diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..7bd3dc7 --- /dev/null +++ b/.env.example @@ -0,0 +1,15 @@ +# Lapdev API configuration + +# Required: PostgreSQL connection URL for the Lapdev database. +# Example: postgres://username:password@db-host:5432/lapdev +LAPDEV_DB_URL=postgres://username:password@localhost:5432/lapdev + +# Optional overrides with sensible defaults. +LAPDEV_BIND_ADDR=0.0.0.0 +LAPDEV_HTTP_PORT=8080 +LAPDEV_SSH_PROXY_PORT=2222 +LAPDEV_SSH_PROXY_DISPLAY_PORT=2222 +LAPDEV_PREVIEW_URL_PROXY_PORT=8443 + +# Set to force all workspaces to run under a specific OS user. +# LAPDEV_FORCE_OSUSER= diff --git a/.github/workflows/release-cli.yml b/.github/workflows/release-cli.yml new file mode 100644 index 0000000..5e8754b --- /dev/null +++ b/.github/workflows/release-cli.yml @@ -0,0 +1,246 @@ +name: Release Lapdev CLI + +on: + workflow_dispatch: + inputs: + version: + description: "Lapdev CLI version (e.g. 0.1.0). Must match Cargo.toml." + required: true + push: + tags: + - "lapdev-cli-v*" + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.vars.outputs.version }} + tag_name: ${{ steps.vars.outputs.tag_name }} + steps: + - uses: actions/checkout@v4 + + - id: vars + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + VERSION="${{ github.event.inputs.version }}" + else + VERSION="${GITHUB_REF_NAME#lapdev-cli-v}" + fi + + VERSION="${VERSION#v}" + if [[ -z "$VERSION" ]]; then + echo "Unable to determine version" >&2 + exit 1 + fi + + TAG_NAME="lapdev-cli-v${VERSION}" + + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "tag_name=$TAG_NAME" >> "$GITHUB_OUTPUT" + + - name: Verify Cargo version matches release version + run: | + FILE_VERSION=$(python - <<'PY' + import pathlib, tomllib + data = tomllib.loads(pathlib.Path("Cargo.toml").read_text()) + print(data["workspace"]["package"]["version"]) + PY + ) + if [[ "$FILE_VERSION" != "${{ steps.vars.outputs.version }}" ]]; then + echo "Cargo workspace version ${FILE_VERSION} does not match requested version ${{ steps.vars.outputs.version }}" >&2 + exit 1 + fi + + build: + needs: prepare + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-22.04 + target: x86_64-unknown-linux-gnu + archive: tar.gz + - os: ubuntu-22.04 + target: aarch64-unknown-linux-gnu + archive: tar.gz + - os: macos-13 + target: x86_64-apple-darwin + archive: tar.gz + - os: macos-14 + target: aarch64-apple-darwin + archive: tar.gz + - os: windows-latest + target: x86_64-pc-windows-msvc + archive: zip + - os: windows-latest + target: aarch64-pc-windows-msvc + archive: zip + runs-on: ${{ matrix.os }} + permissions: + contents: read + id-token: write + env: + TARGET: ${{ matrix.target }} + ARCHIVE_NAME: lapdev-cli-${{ matrix.target }}.${{ matrix.archive }} + ARTIFACT_NAME: lapdev-cli-${{ matrix.target }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + components: "clippy,rustfmt" + + - name: Install Linux cross toolchain + if: runner.os == 'Linux' && contains(matrix.target, 'aarch64-unknown-linux-gnu') + run: | + sudo apt-get update + sudo apt-get install --yes gcc-aarch64-linux-gnu + + - name: Cargo fetch + run: cargo fetch --locked + if: runner.os != 'Windows' + + - name: Cargo fetch (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: cargo fetch --locked + + - name: Build (Unix) + if: runner.os != 'Windows' + run: cargo build -p lapdev-cli --release --locked --target "${TARGET}" + + - name: Build (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: cargo build -p lapdev-cli --release --locked --target $env:TARGET + + - name: Import Apple code signing cert + if: runner.os == 'macOS' + uses: apple-actions/import-codesign-certs@v3 + with: + p12-file-base64: ${{ secrets.APPLE_CODESIGN_CERT_BASE64 }} + p12-password: ${{ secrets.APPLE_CODESIGN_CERT_PASSWORD }} + + - name: Codesign binary (macOS) + if: runner.os == 'macOS' + env: + APPLE_CODESIGN_IDENTITY: ${{ secrets.APPLE_CODESIGN_IDENTITY }} + run: | + codesign --force --timestamp --options runtime --sign "${APPLE_CODESIGN_IDENTITY}" "target/${TARGET}/release/lapdev-cli" + + - name: Notarize macOS binary + if: runner.os == 'macOS' + env: + APPLE_ID: ${{ secrets.APPLE_NOTARIZE_APPLE_ID }} + APPLE_PASSWORD: ${{ secrets.APPLE_NOTARIZE_APP_SPECIFIC_PASSWORD }} + APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }} + run: | + mkdir -p notarize + cp "target/${TARGET}/release/lapdev-cli" notarize/lapdev + ditto -c -k --keepParent notarize/lapdev notarize.zip + xcrun notarytool submit notarize.zip --apple-id "${APPLE_ID}" --team-id "${APPLE_TEAM_ID}" --password "${APPLE_PASSWORD}" --wait + xcrun stapler staple "target/${TARGET}/release/lapdev-cli" + rm -rf notarize notarize.zip + + - name: Sign Windows binary + if: runner.os == 'Windows' + shell: pwsh + env: + WINDOWS_SIGNING_CERT: ${{ secrets.WINDOWS_SIGNING_CERT_BASE64 }} + WINDOWS_SIGNING_CERT_PASSWORD: ${{ secrets.WINDOWS_SIGNING_CERT_PASSWORD }} + run: | + if (-not $env:WINDOWS_SIGNING_CERT) { + throw "WINDOWS_SIGNING_CERT_BASE64 secret is required" + } + if (-not $env:WINDOWS_SIGNING_CERT_PASSWORD) { + throw "WINDOWS_SIGNING_CERT_PASSWORD secret is required" + } + $pfxPath = Join-Path $env:RUNNER_TEMP "codesign.pfx" + [IO.File]::WriteAllBytes($pfxPath, [Convert]::FromBase64String($env:WINDOWS_SIGNING_CERT)) + $signtool = Get-ChildItem "C:\Program Files (x86)\Windows Kits\10\bin" -Filter signtool.exe -Recurse | Sort-Object FullName -Descending | Select-Object -First 1 + if (-not $signtool) { + throw "signtool.exe not found" + } + & $signtool.FullName sign /fd SHA256 /td SHA256 /tr http://timestamp.digicert.com /f $pfxPath /p $env:WINDOWS_SIGNING_CERT_PASSWORD "target\${env:TARGET}\release\lapdev-cli.exe" + Remove-Item $pfxPath -Force + + - name: Prepare artifact (Unix) + if: runner.os != 'Windows' + run: | + BIN="target/${TARGET}/release/lapdev-cli" + install -Dm755 "${BIN}" pack/lapdev + tar -C pack -czf "${ARCHIVE_NAME}" lapdev + + - name: Prepare artifact (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: | + $bin = "target\${env:TARGET}\release\lapdev-cli.exe" + New-Item -ItemType Directory -Path pack -Force | Out-Null + Copy-Item $bin "pack\lapdev.exe" -Force + Compress-Archive -Path "pack\lapdev.exe" -DestinationPath $env:ARCHIVE_NAME -Force + + - uses: sigstore/cosign-installer@v3.7.0 + + - name: Sign artifact (Unix) + if: runner.os != 'Windows' + env: + COSIGN_EXPERIMENTAL: "1" + run: | + cosign sign-blob --yes --output-signature "${ARCHIVE_NAME}.sig" --output-certificate "${ARCHIVE_NAME}.pem" "${ARCHIVE_NAME}" + + - name: Sign artifact (Windows) + if: runner.os == 'Windows' + shell: pwsh + env: + COSIGN_EXPERIMENTAL: "1" + run: | + cosign sign-blob --yes --output-signature "$env:ARCHIVE_NAME.sig" --output-certificate "$env:ARCHIVE_NAME.pem" "$env:ARCHIVE_NAME" + + - uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: | + ${{ env.ARCHIVE_NAME }} + ${{ env.ARCHIVE_NAME }}.sig + ${{ env.ARCHIVE_NAME }}.pem + + publish: + needs: [prepare, build] + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4 + with: + path: release + + - name: List artifacts + run: ls -R release + + - name: Create GitHub release + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ needs.prepare.outputs.tag_name }} + VERSION: ${{ needs.prepare.outputs.version }} + run: | + FILES=() + while IFS= read -r -d '' file; do + FILES+=("$file") + done < <(find release -type f -print0) + + if [[ ${#FILES[@]} -eq 0 ]]; then + echo "No artifacts found to upload" >&2 + exit 1 + fi + + FILES+=("pkg/installers/lapdev-cli.sh" "pkg/installers/lapdev-cli.ps1") + + gh release create "${TAG_NAME}" "${FILES[@]}" \ + --title "Lapdev CLI v${VERSION}" \ + --notes "Prebuilt Lapdev CLI binaries for version v${VERSION}." diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 43de488..503eff8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -76,6 +76,8 @@ jobs: ./target/debian/lapdev-ws_${{ github.event.inputs.lapdev_version }}-1_amd64.deb ./pkg/common/install.sh ./pkg/common/install-ws.sh + ./pkg/installers/lapdev-cli.sh + ./pkg/installers/lapdev-cli.ps1 retention-days: 1 publish: @@ -100,4 +102,6 @@ jobs: if: github.event_name != 'pull_request' run: | gh release create $TAG_NAME --title "$TAG_NAME" --target $GITHUB_SHA \ - lapdev-linux/*/*/* + lapdev-linux/*/*/* \ + pkg/installers/lapdev-cli.sh \ + pkg/installers/lapdev-cli.ps1 diff --git a/.gitignore b/.gitignore index 5c9e44f..b5e3274 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ /lapdev-dashboard/dist .lapce .env +.git-credentials diff --git a/Cargo.lock b/Cargo.lock index d4bdedb..d56e0c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,21 +1,12 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -29,9 +20,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -54,23 +45,23 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom", + "getrandom 0.2.16", "once_cell", "version_check", ] [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", - "getrandom", + "getrandom 0.3.3", "once_cell", "version_check", "zerocopy", @@ -78,9 +69,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -93,9 +84,9 @@ checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" @@ -114,57 +105,70 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "once_cell_polyfill", + "windows-sys 0.59.0", +] + +[[package]] +name = "any_spawner" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1384d3fe1eecb464229fcf6eebb72306591c56bf27b373561489458a7c73027d" +dependencies = [ + "futures", + "thiserror 2.0.17", + "wasm-bindgen-futures", ] [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "argon2" @@ -180,40 +184,194 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-broadcast" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435a87a52755b8f27fcf321ac4f04b2802e337c8c4872923137471ec39c37532" +dependencies = [ + "event-listener 5.4.0", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] [[package]] name = "async-compression" -version = "0.4.6" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" +checksum = "93c1f86859c1af3d514fa19e8323147ff10ea98684e6c7b307912509f50e67b2" dependencies = [ + "compression-codecs", + "compression-core", "futures-core", - "memchr", "pin-project-lite", "tokio", - "zstd", - "zstd-safe", ] [[package]] -name = "async-recursion" -version = "1.0.5" +name = "async-executor" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", + "async-task", + "concurrent-queue", + "fastrand 2.3.0", + "futures-lite 2.6.0", + "pin-project-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.5.0", + "async-executor", + "async-io 2.5.0", + "async-lock 3.4.0", + "blocking", + "futures-lite 2.6.0", + "once_cell", + "tokio", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.28", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca" +dependencies = [ + "async-lock 3.4.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.6.0", + "parking", + "polling 3.9.0", + "rustix 1.0.8", + "slab", + "windows-sys 0.60.2", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.4.0", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-once-cell" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288f83726785267c6f2ef073a3d83dc3f9b81464e9f99898240cced85fce35a" + +[[package]] +name = "async-std" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 2.5.0", + "async-lock 3.4.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 2.6.0", + "gloo-timers 0.3.0", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", ] [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -222,24 +380,30 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] @@ -252,32 +416,30 @@ dependencies = [ ] [[package]] -name = "atomic-write-file" -version = "0.1.2" +name = "atomic-waker" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" -dependencies = [ - "nix", - "rand", -] +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "attribute-derive" -version = "0.8.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c94f43ede6f25dab1dea046bff84d85dea61bd49aba7a9011ad66c0d449077b" +checksum = "0053e96dd3bec5b4879c23a138d6ef26f2cb936c9cdc96274ac2b9ed44b5bb54" dependencies = [ "attribute-derive-macro", + "derive-where", + "manyhow", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "attribute-derive-macro" -version = "0.8.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b409e2b2d2dc206d2c0ad3575a93f001ae21a1593e2d0c69b69c308e63f3b422" +checksum = "463b53ad0fd5b460af4b1915fe045ff4d946d025fb6c4dc3337752eaa980f71b" dependencies = [ "collection_literals", "interpolator", @@ -286,30 +448,55 @@ dependencies = [ "proc-macro2", "quote", "quote-use", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879b6c89592deb404ba4dc0ae6b58ffd1795c78991cbb5b8bc441c48a070440d" +dependencies = [ + "aws-lc-sys", + "untrusted 0.7.1", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "107a4e9d9cab9963e04e84bb8dee0e25f2a987f9a8bad5ed054abd439caa8f8c" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", +] [[package]] name = "axum" -version = "0.7.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" +checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871" dependencies = [ - "async-trait", "axum-core", - "base64 0.21.7", + "axum-macros", + "base64", "bytes", + "form_urlencoded", "futures-util", - "http 1.2.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.6.0", + "hyper", "hyper-util", "itoa", "matchit", @@ -317,16 +504,15 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_json", "serde_path_to_error", "serde_urlencoded", "sha1", - "sync_wrapper 0.1.2", + "sync_wrapper", "tokio", - "tokio-tungstenite 0.21.0", - "tower 0.4.13", + "tokio-tungstenite", + "tower", "tower-layer", "tower-service", "tracing", @@ -334,31 +520,29 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.5.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f5ffe4637708b326c621d5494ab6c91dcf62ee440fa6ee967d289315a9c6f81" +checksum = "3f08a543641554404b42acd0d2494df12ca2be034d7b8ee4dbbf7446f940a2ef" dependencies = [ "axum", - "forwarded-header-value", + "client-ip", "serde", ] [[package]] name = "axum-core" -version = "0.4.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" dependencies = [ - "async-trait", "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", + "futures-core", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", - "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -366,58 +550,59 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "895ff42f72016617773af68fb90da2a9677d89c62338ec09162d4909d86fdd8f" +checksum = "5136e6c5e7e7978fe23e9876fb924af2c0f84c72127ac6ac17e7c46f457d362c" dependencies = [ "axum", "axum-core", "bytes", + "futures-core", "futures-util", "headers", - "http 1.2.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", - "serde", - "tower 0.4.13", "tower-layer", "tower-service", + "tracing", ] [[package]] -name = "backtrace" -version = "0.3.69" +name = "axum-macros" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] -name = "base16ct" -version = "0.2.0" +name = "backon" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d" +dependencies = [ + "fastrand 2.3.0", + "gloo-timers 0.3.0", + "tokio", +] [[package]] -name = "base64" -version = "0.13.1" +name = "base16" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" [[package]] -name = "base64" -version = "0.21.7" +name = "base16ct" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] name = "base64" @@ -444,13 +629,16 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.3.1" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" dependencies = [ + "autocfg", + "libm", "num-bigint", "num-integer", "num-traits", + "serde", ] [[package]] @@ -462,6 +650,26 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.9.1", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.104", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -470,9 +678,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" dependencies = [ "serde", ] @@ -516,6 +724,19 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" +dependencies = [ + "async-channel 2.5.0", + "async-task", + "futures-io", + "futures-lite 2.6.0", + "piper", +] + [[package]] name = "blowfish" version = "0.9.1" @@ -528,9 +749,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.1" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ "borsh-derive", "cfg_aliases", @@ -538,29 +759,28 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.98", - "syn_derive", + "syn 2.0.104", ] [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "bytecheck" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -569,13 +789,13 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -586,15 +806,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "camino" -version = "1.1.6" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" [[package]] name = "cbc" @@ -607,19 +827,35 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" dependencies = [ "jobserver", "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", ] [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -640,9 +876,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", @@ -650,138 +886,208 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-link 0.1.3", ] [[package]] -name = "ciborium" -version = "0.2.1" +name = "cipher" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", + "crypto-common", + "inout", ] [[package]] -name = "ciborium-io" -version = "0.2.1" +name = "clang-sys" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] [[package]] -name = "ciborium-ll" -version = "0.2.1" +name = "clap" +version = "4.5.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" dependencies = [ - "ciborium-io", - "half", + "clap_builder", + "clap_derive", ] [[package]] -name = "cipher" -version = "0.4.4" +name = "clap_builder" +version = "4.5.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" dependencies = [ - "crypto-common", - "inout", + "anstream", + "anstyle", + "clap_lex", + "strsim", ] [[package]] -name = "clap" -version = "4.5.2" +name = "clap_derive" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ - "clap_builder", - "clap_derive", + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] -name = "clap_builder" -version = "4.5.2" +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" + +[[package]] +name = "client-ip" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +checksum = "31211fc26899744f5b22521fdc971e5f3875991d8880537537470685a0e9552d" dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.11.0", + "http", ] [[package]] -name = "clap_derive" -version = "4.5.0" +name = "cmake" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.98", + "cc", ] [[package]] -name = "clap_lex" -version = "0.7.0" +name = "codee" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "fd8bbfdadf2f8999c6e404697bc08016dce4a3d77dec465b36c9a0652fdb3327" +dependencies = [ + "serde", + "serde_json", + "thiserror 2.0.17", +] [[package]] name = "collection_literals" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186dce98367766de751c42c4f03970fc60fc012296e706ccbb9d5df9b6c1e271" +checksum = "26b3f65b8fb8e88ba339f7d23a390fe1b0896217da05e2a66c584c9b29a91df8" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] -name = "config" -version = "0.13.3" +name = "colored" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" +checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ - "async-trait", "lazy_static", - "nom", + "windows-sys 0.59.0", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "compression-codecs" +version = "0.4.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "680dc087785c5230f8e8843e2e57ac7c1c90488b6a91b88caa265410568f441b" +dependencies = [ + "compression-core", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a9b614a5787ef0c8802a55766480563cb3a93b435898c422ed2a359cf811582" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "config" +version = "0.15.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b1eb4fb07bc7f012422df02766c7bd5971effb894f573865642f06fa3265440" +dependencies = [ + "convert_case 0.6.0", "pathdiff", "serde", - "toml 0.5.11", + "toml", + "winnow", ] [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const-str" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "451d0640545a0553814b4c646eb549343561618838e9b42495f466131fe3ad49" [[package]] name = "const_format" -version = "0.2.32" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.32" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] +[[package]] +name = "const_str_slice_concat" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f67855af358fcb20fac58f9d714c94e2b228fe5694c1c9b4ead4a366343eda1b" + [[package]] name = "convert_case" version = "0.6.0" @@ -791,11 +1097,20 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "convert_case" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -803,9 +1118,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -828,9 +1143,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.0.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -843,37 +1158,36 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crypto-bigint" @@ -882,7 +1196,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -894,15 +1208,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", "typenum", ] [[package]] name = "ct-codecs" -version = "1.1.1" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3b7eb4404b8195a9abb6356f4ac07d8ba267045c8d6d220ac4dc992e6cc75df" +checksum = "9b10589d1a5e400d61f9f38f12f884cfd080ff345de8f17efda36fe0e4a02aa8" [[package]] name = "ctr" @@ -923,7 +1236,7 @@ dependencies = [ "cpufeatures", "curve25519-dalek-derive", "digest", - "fiat-crypto", + "fiat-crypto 0.2.9", "rustc_version", "subtle", "zeroize", @@ -937,52 +1250,88 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core 0.20.11", + "darling_macro 0.20.11", ] [[package]] name = "darling" -version = "0.14.4" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + +[[package]] +name = "darling_core" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ - "darling_core", - "darling_macro", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", ] [[package]] name = "darling_core" -version = "0.14.4" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 1.0.105", + "strsim", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core 0.20.11", + "quote", + "syn 2.0.104", ] [[package]] name = "darling_macro" -version = "0.14.4" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core", + "darling_core 0.21.3", "quote", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] name = "dashmap" -version = "5.4.0" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", - "hashbrown 0.12.3", + "crossbeam-utils", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", @@ -990,26 +1339,26 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "delegate" -version = "0.13.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297806318ef30ad066b15792a8372858020ae3ca2e414ee6c2133b1eb9e9e945" +checksum = "6178a82cf56c836a3ba61a7935cdb1c49bfaa6fa4327cd5bf554a503087de26b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "der" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", "pem-rfc7468", @@ -1018,65 +1367,75 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", "serde", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.105", -] - [[package]] name = "derive-where" -version = "1.2.5" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146398d62142a0f35248a608f17edf0dde57338354966d6e41d0eb2d16980ccb" +checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "derive_builder" -version = "0.12.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.12.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] name = "derive_builder_macro" -version = "0.12.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 1.0.105", + "syn 2.0.104", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", ] [[package]] @@ -1091,14 +1450,46 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.61.2", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "docker-compose-types" -version = "0.7.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608ffe949fbffae4034bdde4bfa224238736ecc9e1a362997245c7282f49fa60" +checksum = "7edb75a85449fd9c34d9fb3376c6208ec4115d2ca43b965175a52d71349ecab8" dependencies = [ "derive_builder", - "indexmap 2.1.0", + "indexmap", "serde", "serde_yaml", ] @@ -1115,6 +1506,18 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "669a445ee724c5c69b1b06fe0b63e70a1c84bc9bb7d9696cd4f4e3ec45050408" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "ecdsa" version = "0.16.9" @@ -1141,48 +1544,71 @@ dependencies = [ [[package]] name = "ed25519-compact" -version = "2.0.6" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a667e6426df16c2ac478efa4a439d0e674cba769c5556e8cf221739251640c8c" +checksum = "e9b3460f44bea8cd47f45a0c70892f1eff856d97cd55358b2f73f663789f6190" dependencies = [ - "getrandom", + "getrandom 0.2.16", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", + "subtle", "zeroize", ] [[package]] name = "educe" -version = "0.4.20" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4bd92664bf78c4d3dba9b7cdafce6fa15b13ed3ed16175218196942e99168a8" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "educe" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0188e3c3ba8df5753894d54461f0e39bc91741dc5b22e1c46999ec2c71f4e4" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] name = "either" -version = "1.9.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" dependencies = [ "serde", ] +[[package]] +name = "either_of" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216d23e0ec69759a17f05e1c553f3a6870e5ec73420fbb07807a6f34d5d1d5a4" +dependencies = [ + "paste", + "pin-project-lite", +] + [[package]] name = "elliptic-curve" version = "0.13.8" @@ -1198,33 +1624,30 @@ dependencies = [ "hkdf", "pem-rfc7468", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", ] [[package]] -name = "encoding_rs" -version = "0.8.33" +name = "enum-ordinalize" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" dependencies = [ - "cfg-if", + "enum-ordinalize-derive", ] [[package]] -name = "enum-ordinalize" -version = "3.1.12" +name = "enum-ordinalize-derive" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bb1df8b45ecb7ffa78dca1c17a438fb193eb083db0b1b494d2a61bcb5096a" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ - "num-bigint", - "num-traits", "proc-macro2", "quote", - "rustc_version", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] @@ -1236,23 +1659,29 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "erased" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1731451909bde27714eacba19c2566362a7f35224f52b153d3f42cf60f72472" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -1272,51 +1701,81 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener 5.4.0", + "pin-project-lite", +] + [[package]] name = "fastrand" -version = "2.0.1" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ff" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] [[package]] name = "fiat-crypto" -version = "0.2.3" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "fiat-crypto" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f69037fe1b785e84986b4f2cbcf647381876a00671d25ceef715d7812dd7e1dd" +checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" [[package]] name = "filetime" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", - "windows-sys 0.52.0", + "libredox", + "windows-sys 0.59.0", ] -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "flate2" -version = "1.0.28" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -1324,13 +1783,13 @@ dependencies = [ [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", - "spin 0.9.8", + "spin", ] [[package]] @@ -1340,35 +1799,52 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "form_urlencoded" -version = "1.2.1" +name = "foldhash" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ - "percent-encoding", + "foreign-types-shared", ] [[package]] -name = "forwarded-header-value" +name = "foreign-types-shared" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "nonempty", - "thiserror 1.0.58", + "percent-encoding", ] [[package]] name = "fs4" -version = "0.10.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec6fcfb3c0c1d71612528825042261419d5dade9678c39a781e05b63677d9b32" +checksum = "8640e34b88f7652208ce9e88b1a37a2ae95227d84abec377ccd3c5cfeb141ed4" dependencies = [ - "rustix", + "rustix 1.0.8", "tokio", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "fsevent-sys" version = "4.1.0" @@ -1386,9 +1862,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1401,9 +1877,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1411,19 +1887,20 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", "futures-util", + "num_cpus", ] [[package]] @@ -1439,38 +1916,66 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1497,40 +2002,48 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] -name = "ghash" -version = "0.5.0" +name = "getrandom" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ - "opaque-debug", - "polyval", + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.28.1" +name = "ghash" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] [[package]] name = "git2" -version = "0.18.2" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b3ba52851e73b46a4c3df1d89343741112003f0f6f13beb0dfac9e457c3fdcd" +checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.1", "libc", "libgit2-sys", "log", @@ -1541,62 +2054,53 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gloo-net" -version = "0.2.6" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9902a044653b26b99f7e3693a42f171312d9be8b26b5697bd1e43ad1f8a35e10" +checksum = "c06f627b1a58ca3d42b45d6104bf1e1a03799df472df00988b6ba21accc10580" dependencies = [ "futures-channel", "futures-core", "futures-sink", - "gloo-utils 0.1.7", + "gloo-utils", + "http", "js-sys", "pin-project", "serde", "serde_json", - "thiserror 1.0.58", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", ] [[package]] -name = "gloo-net" -version = "0.5.0" +name = "gloo-timers" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43aaa242d1239a8822c15c645f02166398da4f8b5c4bae795c1f5b44e9eee173" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", - "futures-sink", - "gloo-utils 0.2.0", - "http 0.2.9", "js-sys", - "pin-project", - "serde", - "serde_json", - "thiserror 1.0.58", "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", ] [[package]] -name = "gloo-utils" -version = "0.1.7" +name = "gloo-timers" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ + "futures-channel", + "futures-core", "js-sys", - "serde", - "serde_json", "wasm-bindgen", - "web-sys", ] [[package]] @@ -1619,92 +2123,86 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core", + "rand_core 0.6.4", "subtle", ] [[package]] -name = "h2" -version = "0.3.21" +name = "guardian" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.9", - "indexmap 1.9.3", - "slab", - "tokio", - "tokio-util", - "tracing", -] +checksum = "17e2ac29387b1aa07a1e448f7bb4f35b500787971e965b02842b900afa5c8f6f" [[package]] name = "h2" -version = "0.4.2" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31d030e59af851932b72ceebadf4a2b5986dba4c3b99dd2493f8273a0f151943" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", - "http 1.2.0", - "indexmap 2.1.0", + "http", + "indexmap", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", ] [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ - "ahash 0.8.6", "allocator-api2", + "equivalent", + "foldhash", ] +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + [[package]] name = "hashlink" -version = "0.8.4" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.14.2", + "hashbrown 0.15.4", ] [[package]] name = "headers" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" dependencies = [ - "base64 0.21.7", + "base64", "bytes", "headers-core", - "http 1.2.0", + "http", "httpdate", "mime", "sha1", @@ -1716,7 +2214,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" dependencies = [ - "http 1.2.0", + "http", ] [[package]] @@ -1724,18 +2222,24 @@ name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -dependencies = [ - "unicode-segmentation", -] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -1751,9 +2255,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac", ] @@ -1769,55 +2273,44 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] -name = "html-escape" -version = "0.2.13" +name = "hostname" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d1ad449764d627e22bfd7cd5e8868264fc9236e07c752972b4080cd351cb476" +checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ - "utf8-width", + "cfg-if", + "libc", + "windows-link 0.1.3", ] [[package]] -name = "http" -version = "0.2.9" +name = "html-escape" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "6d1ad449764d627e22bfd7cd5e8868264fc9236e07c752972b4080cd351cb476" dependencies = [ - "bytes", - "fnv", - "itoa", + "utf8-width", ] [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", "itoa", ] -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http 0.2.9", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1825,33 +2318,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.2.0", + "http", ] [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", + "futures-core", + "http", + "http-body", "pin-project-lite", ] [[package]] name = "http-range-header" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce4ef31cda248bbdb6e6820603b82dfcd9e833db65a43e997a0ccec777d11fe" +checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c" [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1861,106 +2354,64 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] -name = "hyper" -version = "0.14.27" +name = "hydration_context" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "e8714ae4adeaa846d838f380fbd72f049197de629948f91bf045329e0cf0a283" dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.21", - "http 0.2.9", - "http-body 0.4.5", - "httparse", - "httpdate", - "itoa", + "futures", + "once_cell", + "or_poisoned", "pin-project-lite", - "socket2 0.4.7", - "tokio", - "tower-service", - "tracing", - "want", + "serde", + "throw_error", ] [[package]] name = "hyper" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", - "h2 0.4.2", - "http 1.2.0", - "http-body 1.0.1", + "futures-core", + "h2", + "http", + "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", ] -[[package]] -name = "hyper-http-proxy" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" -dependencies = [ - "bytes", - "futures-util", - "headers", - "http 1.2.0", - "hyper 1.6.0", - "hyper-rustls 0.27.5", - "hyper-util", - "pin-project-lite", - "rustls-native-certs 0.7.3", - "tokio", - "tokio-rustls 0.26.1", - "tower-service", -] - -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.9", - "hyper 0.14.27", - "rustls 0.21.8", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", - "http 1.2.0", - "hyper 1.6.0", + "http", + "hyper", "hyper-util", "log", - "rustls 0.23.23", - "rustls-native-certs 0.8.1", + "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.1", + "tokio-rustls", "tower-service", + "webpki-roots 1.0.2", ] [[package]] @@ -1969,7 +2420,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.6.0", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -1978,18 +2429,23 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ + "base64", "bytes", "futures-channel", + "futures-core", "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "hyper 1.6.0", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.6.0", "tokio", "tower-service", "tracing", @@ -2002,7 +2458,7 @@ source = "git+https://github.com/softprops/hyperlocal?rev=70c0b8e4007b96ed392be7 dependencies = [ "hex", "http-body-util", - "hyper 1.6.0", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -2011,16 +2467,17 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", - "windows-core 0.51.1", + "windows-core 0.61.2", ] [[package]] @@ -2033,74 +2490,162 @@ dependencies = [ ] [[package]] -name = "ident_case" -version = "1.0.1" +name = "icu_collections" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] [[package]] -name = "idna" -version = "0.3.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", ] [[package]] -name = "include_dir" -version = "0.7.3" +name = "icu_normalizer" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ - "include_dir_macros", + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", ] [[package]] -name = "include_dir_macros" -version = "0.7.3" +name = "icu_normalizer_data" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" -dependencies = [ - "proc-macro2", - "quote", -] +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] -name = "indexmap" -version = "1.9.3" +name = "icu_properties" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", ] [[package]] -name = "indexmap" -version = "2.1.0" +name = "icu_properties_data" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" -dependencies = [ - "equivalent", - "hashbrown 0.14.2", - "serde", -] +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] -name = "inherent" -version = "1.0.10" +name = "icu_provider" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce243b1bfa62ffc028f1cc3b6034ec63d649f3031bc8a4fbbb004e1ac17d1f68" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", ] [[package]] -name = "inotify" +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "include_dir" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "indexmap" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +dependencies = [ + "equivalent", + "hashbrown 0.16.0", + "serde", + "serde_core", +] + +[[package]] +name = "inherent" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c38228f24186d9cc68c729accb4d413be9eaed6ad07ff79e0270d9e56f3de13" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "inotify" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" @@ -2121,19 +2666,28 @@ dependencies = [ [[package]] name = "inout" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ "block-padding", "generic-array", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "internal-russh-forked-ssh-key" -version = "0.6.9+upstream-0.6.7" +version = "0.6.11+upstream-0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5af01d366561582e9ea5f841837cc1d8e37e7142a32f33a43801e81863cba5" +checksum = "e0a77eae781ed6a7709fb15b64862fcca13d886b07c7e2786f5ed34e5e2b9187" dependencies = [ "argon2", "bcrypt-pbkdf", @@ -2145,7 +2699,7 @@ dependencies = [ "p256", "p384", "p521", - "rand_core", + "rand_core 0.6.4", "rsa", "sec1", "sha1", @@ -2164,59 +2718,116 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71dd52191aae121e8611f1e8dc3e324dd0dd1dee1e6dd91d10ee07a3cfb4d9d8" [[package]] -name = "inventory" -version = "0.3.13" +name = "io-lifetimes" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0508c56cfe9bfd5dfeb0c22ab9a6abfda2f27bdca422132e494266351ed8d83c" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] [[package]] name = "ipnet" -version = "2.9.0" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] [[package]] name = "itertools" -version = "0.12.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.3", "libc", ] [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "159294d661a039f7644cea7e4d844e6b25aaf71c1ffe9d73a96d768c24b0faf4" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror 1.0.69", +] + [[package]] name = "json5" version = "0.4.1" @@ -2238,27 +2849,51 @@ dependencies = [ "pest_derive", "regex", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.17", +] + +[[package]] +name = "jsonptr" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5a3cc660ba5d72bce0b3bb295bf20847ccbb40fd423f3f05b61273672e561fe" +dependencies = [ + "serde", + "serde_json", ] [[package]] name = "k8s-openapi" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c75b990324f09bef15e791606b7b7a296d02fc88a344f6eba9390970a870ad5" +checksum = "d13f06d5326a915becaffabdfab75051b8cdc260c2a5c06c0e90226ede89a692" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "serde", - "serde-value", "serde_json", ] +[[package]] +name = "keyring" +version = "3.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebcc3aff044e5944a8fbaf69eb277d11986064cba30c468730e8b9909fb551c" +dependencies = [ + "byteorder", + "linux-keyutils", + "log", + "security-framework 2.11.1", + "security-framework 3.2.0", + "windows-sys 0.60.2", + "zeroize", +] + [[package]] name = "kqueue" -version = "1.0.8" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" dependencies = [ "kqueue-sys", "libc", @@ -2276,89 +2911,146 @@ dependencies = [ [[package]] name = "kube" -version = "0.98.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32053dc495efad4d188c7b33cc7c02ef4a6e43038115348348876efd39a53cba" +checksum = "48e7bb0b6a46502cc20e4575b6ff401af45cfea150b34ba272a3410b78aa014e" dependencies = [ "k8s-openapi", "kube-client", "kube-core", + "kube-derive", + "kube-runtime", ] [[package]] name = "kube-client" -version = "0.98.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d34ad38cdfbd1fa87195d42569f57bb1dda6ba5f260ee32fef9570b7937a0c9" +checksum = "4987d57a184d2b5294fdad3d7fc7f278899469d21a4da39a8f6ca16426567a36" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "chrono", "either", "futures", "home", - "http 1.2.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.6.0", - "hyper-http-proxy", - "hyper-rustls 0.27.5", + "hyper", + "hyper-rustls", "hyper-timeout", "hyper-util", "jsonpath-rust", "k8s-openapi", "kube-core", "pem", - "rustls 0.23.23", - "rustls-pemfile 2.1.0", + "rustls", "secrecy", "serde", "serde_json", "serde_yaml", - "thiserror 2.0.11", + "thiserror 2.0.17", "tokio", "tokio-util", - "tower 0.5.2", - "tower-http 0.6.2", + "tower", + "tower-http", "tracing", ] [[package]] name = "kube-core" -version = "0.98.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97aa830b288a178a90e784d1b0f1539f2d200d2188c7b4a3146d9dc983d596f3" +checksum = "914bbb770e7bb721a06e3538c0edd2babed46447d128f7c21caa68747060ee73" dependencies = [ "chrono", + "derive_more", "form_urlencoded", - "http 1.2.0", + "http", + "json-patch", "k8s-openapi", + "schemars", "serde", "serde-value", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.17", +] + +[[package]] +name = "kube-derive" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03dee8252be137772a6ab3508b81cd797dee62ee771112a2453bc85cbbe150d2" +dependencies = [ + "darling 0.21.3", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn 2.0.104", +] + +[[package]] +name = "kube-runtime" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aea4de4b562c5cc89ab10300bb63474ae1fa57ff5a19275f2e26401a323e3fd" +dependencies = [ + "ahash 0.8.12", + "async-broadcast", + "async-stream", + "backon", + "educe 0.6.0", + "futures", + "hashbrown 0.15.4", + "hostname", + "json-patch", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", ] [[package]] name = "lapdev" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", + "dotenvy", "lapdev-api", + "lapdev-cli", "lapdev-db", "lapdev-guest-agent", "lapdev-kube", + "lapdev-kube-manager", "lapdev-ws", - "sea-orm-migration", + "sea-orm-cli", "tokio", "tracing", "tracing-appender", + "tracing-journald", "tracing-subscriber", ] [[package]] name = "lapdev-api" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "axum", @@ -2370,67 +3062,131 @@ dependencies = [ "futures", "futures-util", "git2", - "hyper 1.6.0", + "hyper", "hyper-util", "include_dir", - "itertools 0.12.0", + "itertools 0.14.0", + "k8s-openapi", + "lapdev-api-hrpc", "lapdev-common", "lapdev-conductor", "lapdev-db", + "lapdev-db-entities", + "lapdev-devbox-rpc", "lapdev-enterprise", "lapdev-kube", + "lapdev-kube-rpc", "lapdev-proxy-http", "lapdev-proxy-ssh", "lapdev-rpc", + "lapdev-tunnel", "oauth2", "pasetors", + "pin-project", + "regex", "reqwest", "russh", - "rustls-pemfile 2.1.0", - "rustls-webpki 0.102.8", + "rustls-pemfile", + "rustls-webpki", "sea-orm", + "secrecy", "serde", "serde_json", + "serde_yaml", "sqlx", + "tarpc", "tokio", - "tokio-rustls 0.25.0", - "toml 0.8.11", - "tower 0.4.13", - "tower-http 0.5.2", + "tokio-rustls", + "tokio-stream", + "tokio-tungstenite", + "tokio-util", + "toml", + "tower", + "tower-http", "tracing", "tracing-appender", "tracing-subscriber", "uuid", ] +[[package]] +name = "lapdev-api-hrpc" +version = "0.2.0" +dependencies = [ + "lapdev-common", + "lapdev-hrpc", + "serde", + "uuid", +] + +[[package]] +name = "lapdev-cli" +version = "0.2.0" +dependencies = [ + "anyhow", + "bytes", + "chrono", + "clap", + "colored", + "dirs", + "futures", + "futures-util", + "hostname", + "http", + "keyring", + "lapdev-common", + "lapdev-devbox-rpc", + "lapdev-rpc", + "lapdev-tunnel", + "quinn", + "reqwest", + "serde", + "serde_json", + "tarpc", + "tempfile", + "thiserror 2.0.17", + "tokio", + "tokio-tungstenite", + "tokio-util", + "tracing", + "tracing-appender", + "tracing-subscriber", + "urlencoding", + "uuid", + "webbrowser", + "windows-sys 0.59.0", +] + [[package]] name = "lapdev-common" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "base16ct", "chrono", - "rand", + "rand 0.9.2", + "secrecy", "serde", "serde_json", "sha2", - "strum 0.26.1", + "strum 0.27.2", "strum_macros", "uuid", ] [[package]] name = "lapdev-conductor" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "chrono", "data-encoding", "futures", "git2", - "itertools 0.12.0", + "itertools 0.14.0", "lapdev-common", "lapdev-db", + "lapdev-db-entities", "lapdev-enterprise", "lapdev-rpc", "russh", @@ -2449,18 +3205,23 @@ dependencies = [ [[package]] name = "lapdev-dashboard" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "chrono", "futures", - "gloo-net 0.5.0", + "getrandom 0.3.3", + "gloo-net", + "gloo-timers 0.2.6", + "lapdev-api-hrpc", "lapdev-common", "leptos", "leptos_router", + "lucide-leptos", "rust_decimal", "serde", "serde_json", + "tailwind_fuse", "urlencoding", "uuid", "wasm-bindgen", @@ -2469,35 +3230,76 @@ dependencies = [ [[package]] name = "lapdev-db" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", - "base64 0.22.1", + "base64", "chrono", + "hex", "lapdev-common", + "lapdev-db-entities", + "lapdev-db-migration", "pasetors", "sea-orm", "sea-orm-migration", + "secrecy", + "serde", + "serde_json", + "serde_yaml", "sqlx", "tokio", + "tracing", + "uuid", +] + +[[package]] +name = "lapdev-db-entities" +version = "0.2.0" +dependencies = [ + "sea-orm", + "sea-orm-cli", + "tokio", +] + +[[package]] +name = "lapdev-db-migration" +version = "0.2.0" +dependencies = [ + "lapdev-common", + "lapdev-db-entities", + "sea-orm-cli", + "sea-orm-migration", + "tokio", + "uuid", +] + +[[package]] +name = "lapdev-devbox-rpc" +version = "0.2.0" +dependencies = [ + "chrono", + "lapdev-common", + "serde", + "tarpc", "uuid", ] [[package]] name = "lapdev-enterprise" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", - "base64 0.22.1", + "base64", "chrono", "lapdev-common", "lapdev-db", + "lapdev-db-entities", "lapdev-rpc", "pasetors", "sea-orm", "serde", "serde_json", - "strum 0.26.1", + "strum 0.27.2", "strum_macros", "tokio", "uuid", @@ -2505,37 +3307,174 @@ dependencies = [ [[package]] name = "lapdev-guest-agent" -version = "0.1.0" +version = "0.2.0" +dependencies = [ + "serde_json", + "tracing", +] + +[[package]] +name = "lapdev-hrpc" +version = "0.2.0" +dependencies = [ + "anyhow", + "gloo-net", + "http", + "lapdev-hrpc-proc-macro", + "reqwest", + "serde", + "serde_json", +] + +[[package]] +name = "lapdev-hrpc-proc-macro" +version = "0.2.0" +dependencies = [ + "lapdev-hrpc", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "lapdev-kube" +version = "0.2.0" +dependencies = [ + "anyhow", + "axum", + "base64", + "chrono", + "futures", + "httparse", + "k8s-openapi", + "kube", + "lapdev-common", + "lapdev-db", + "lapdev-db-entities", + "lapdev-kube-rpc", + "lapdev-rpc", + "lapdev-tunnel", + "pasetors", + "pem", + "reqwest", + "sea-orm", + "serde", + "serde_json", + "serde_yaml", + "tarpc", + "thiserror 2.0.17", + "tokio", + "tokio-tungstenite", + "tracing", + "uuid", + "yup-oauth2", +] + +[[package]] +name = "lapdev-kube-manager" +version = "0.2.0" +dependencies = [ + "anyhow", + "bytes", + "chrono", + "futures", + "futures-util", + "k8s-openapi", + "kube", + "lapdev-common", + "lapdev-kube-rpc", + "lapdev-rpc", + "lapdev-tunnel", + "reqwest", + "serde", + "serde_yaml", + "tarpc", + "tokio", + "tokio-tungstenite", + "tokio-util", + "tracing", + "tracing-journald", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "lapdev-kube-rpc" +version = "0.2.0" dependencies = [ + "chrono", + "httparse", + "k8s-openapi", + "lapdev-common", + "serde", "serde_json", + "tarpc", + "tokio", + "tracing", + "uuid", ] [[package]] -name = "lapdev-kube" -version = "0.1.0" +name = "lapdev-kube-sidecar-proxy" +version = "0.2.0" dependencies = [ "anyhow", - "base64 0.22.1", + "axum", + "bytes", + "chrono", + "clap", + "futures", + "futures-util", + "h2", + "http", + "http-body-util", + "httparse", + "hyper", + "hyper-util", "k8s-openapi", "kube", + "lapdev-common", + "lapdev-hrpc", + "lapdev-kube-rpc", + "lapdev-rpc", + "lapdev-tunnel", + "libc", + "nix", + "serde", + "serde_json", + "serde_yaml", + "socket2 0.5.10", + "tarpc", + "thiserror 2.0.17", + "tokio", + "tokio-tungstenite", + "tokio-util", + "tower", + "tower-http", + "tracing", + "tracing-subscriber", + "url", + "uuid", ] [[package]] name = "lapdev-proxy-http" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "axum", "futures", - "hyper 1.6.0", + "hyper", "lapdev-db", + "lapdev-db-entities", "tokio", - "tokio-tungstenite 0.26.1", + "tokio-tungstenite", + "tracing", ] [[package]] name = "lapdev-proxy-ssh" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "async-trait", @@ -2543,6 +3482,7 @@ dependencies = [ "lapdev-common", "lapdev-conductor", "lapdev-db", + "lapdev-db-entities", "russh", "sea-orm", "serde_json", @@ -2552,24 +3492,49 @@ dependencies = [ [[package]] name = "lapdev-rpc" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "axum", "chrono", "futures", "lapdev-common", + "pin-project", "serde", "serde_json", "tarpc", "tokio", + "tokio-util", + "tracing", + "uuid", +] + +[[package]] +name = "lapdev-tunnel" +version = "0.2.0" +dependencies = [ + "bytes", + "chrono", + "futures", + "futures-util", + "lapdev-common", + "pin-project", + "quinn", + "rand 0.9.2", + "rcgen", + "serde", + "thiserror 2.0.17", + "tokio", + "tokio-serde", + "tokio-tungstenite", + "tokio-util", "tracing", "uuid", ] [[package]] name = "lapdev-ws" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "async-compression", @@ -2582,7 +3547,7 @@ dependencies = [ "futures", "git2", "http-body-util", - "hyper 1.6.0", + "hyper", "hyper-util", "hyperlocal", "json5", @@ -2600,7 +3565,7 @@ dependencies = [ "tempfile", "tokio", "tokio-util", - "toml 0.8.11", + "toml", "tracing", "tracing-appender", "tracing-subscriber", @@ -2610,196 +3575,189 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] name = "leptos" -version = "0.6.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c115de7c6fca2164133e18328777d02c371434ace38049ac02886b5cffd22dc" +checksum = "20adc17f0584e5f605a31444179bae17c399a2d160bf19eb3da701a1f6e7cb8e" dependencies = [ + "any_spawner", "cfg-if", + "either_of", + "futures", + "getrandom 0.3.3", + "hydration_context", "leptos_config", "leptos_dom", + "leptos_hot_reload", "leptos_macro", - "leptos_reactive", "leptos_server", + "oco_ref", + "or_poisoned", + "paste", + "reactive_graph", + "rustc-hash", + "rustc_version", + "send_wrapper", + "serde", + "serde_json", + "serde_qs", "server_fn", - "tracing", + "slotmap", + "tachys", + "thiserror 2.0.17", + "throw_error", "typed-builder", "typed-builder-macro", "wasm-bindgen", + "wasm-bindgen-futures", + "wasm_split_helpers", "web-sys", ] [[package]] name = "leptos_config" -version = "0.6.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055262ff3660e95ec95cadd8a05a02070d624354e08e30b99c14a81023feb2dc" +checksum = "b0fddaae8dbc1680aa59c40c8f8ebb780b9a841e503d3cc5143d346a40c6d8ab" dependencies = [ "config", "regex", "serde", - "thiserror 1.0.58", + "thiserror 2.0.17", "typed-builder", ] [[package]] name = "leptos_dom" -version = "0.6.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfea7feb9c488448db466ca0673b691ec2a05efb0b2bc6626b7248ee04bb39c" +checksum = "1aa676df0da118c690d65669eb322f47f0e47f5505ce5d2119ed5b4432d3c732" dependencies = [ - "async-recursion", - "cfg-if", - "drain_filter_polyfill", - "futures", - "getrandom", - "html-escape", - "indexmap 2.1.0", - "itertools 0.12.0", "js-sys", - "leptos_reactive", - "once_cell", - "pad-adapter", - "paste", - "rustc-hash", - "serde", - "serde_json", - "server_fn", - "smallvec", - "tracing", + "or_poisoned", + "reactive_graph", + "send_wrapper", + "tachys", "wasm-bindgen", - "wasm-bindgen-futures", "web-sys", ] [[package]] name = "leptos_hot_reload" -version = "0.6.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ba8f68f7c3135975eb34ed19210272ebef2d6f422d138ff87a726b8793fe105" +checksum = "0d61ec3e1ff8aaee8c5151688550c0363f85bc37845450764c31ff7584a33f38" dependencies = [ "anyhow", "camino", - "indexmap 2.1.0", + "indexmap", "parking_lot", "proc-macro2", "quote", "rstml", "serde", - "syn 2.0.98", + "syn 2.0.104", "walkdir", ] [[package]] name = "leptos_macro" -version = "0.6.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039c510dafb7d9997e4b8accfcced5675fabc65720caf544592df32432d6d65a" +checksum = "5a824b74d70dd505e5fe32952150325275050febb0d0bd7ac8b8efc50a69ec95" dependencies = [ "attribute-derive", "cfg-if", - "convert_case", + "convert_case 0.8.0", "html-escape", - "itertools 0.12.0", + "itertools 0.14.0", "leptos_hot_reload", "prettyplease", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", "rstml", + "rustc_version", "server_fn_macro", - "syn 2.0.98", - "tracing", + "syn 2.0.104", "uuid", ] [[package]] -name = "leptos_reactive" -version = "0.6.5" +name = "leptos_router" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30c5bc7f3496d6ba399578171cf133c50d2172f9791fe292db4da2fd0d8cec4" +checksum = "3991de30a9cce8082e8c0d729744c8f6ffd66a828629806bf1332cb624123c75" dependencies = [ - "base64 0.21.7", - "cfg-if", + "any_spawner", + "either_of", "futures", - "indexmap 2.1.0", + "gloo-net", "js-sys", - "paste", - "pin-project", - "rustc-hash", - "self_cell", - "serde", - "serde-wasm-bindgen", - "serde_json", - "slotmap", - "thiserror 1.0.58", - "tracing", + "leptos", + "leptos_router_macro", + "or_poisoned", + "reactive_graph", + "rustc_version", + "send_wrapper", + "tachys", + "thiserror 2.0.17", + "url", "wasm-bindgen", - "wasm-bindgen-futures", "web-sys", ] [[package]] -name = "leptos_router" -version = "0.6.5" +name = "leptos_router_macro" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498159479603569e1d7c969cffa0817b73249e96f825c295743f8f4607a68cbe" +checksum = "571042420d79f4f5b6b0d149dc1561b03f47e08c37c8fa0dfc80c73ad67be8af" dependencies = [ - "cfg-if", - "gloo-net 0.2.6", - "itertools 0.12.0", - "js-sys", - "lazy_static", - "leptos", - "linear-map", - "once_cell", - "percent-encoding", - "send_wrapper", - "serde", - "serde_json", - "serde_qs", - "thiserror 1.0.58", - "tracing", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] name = "leptos_server" -version = "0.6.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f06b9b860479385991fad54cbee372382aee3c1e75ca78b5da6f8bda90c153e1" +checksum = "38acbf32649a4b127c8d4ccaed8fb388e19a746430a0ea8f8160e51e28c36e2d" dependencies = [ - "inventory", - "lazy_static", - "leptos_macro", - "leptos_reactive", + "any_spawner", + "base64", + "codee", + "futures", + "hydration_context", + "or_poisoned", + "reactive_graph", + "send_wrapper", "serde", + "serde_json", "server_fn", - "thiserror 1.0.58", - "tracing", + "tachys", ] [[package]] name = "libc" -version = "0.2.162" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libgit2-sys" -version = "0.16.2+1.7.2" +version = "0.18.2+1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" +checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" dependencies = [ "cc", "libc", @@ -2809,17 +3767,38 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "libloading" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +dependencies = [ + "cfg-if", + "windows-targets 0.53.3", +] + [[package]] name = "libm" -version = "0.2.8" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" +dependencies = [ + "bitflags 2.9.1", + "libc", + "redox_syscall", +] [[package]] name = "libsqlite3-sys" -version = "0.27.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -2828,9 +3807,9 @@ dependencies = [ [[package]] name = "libssh2-sys" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" +checksum = "220e4f05ad4a218192533b300327f5150e809b54c4ec83b5a1d91833601811b9" dependencies = [ "cc", "libc", @@ -2842,9 +3821,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "libc", @@ -2857,22 +3836,40 @@ name = "linear-map" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfae20f6b19ad527b550c223fddc3077a547fc70cda94b9b566575423fd303ee" + +[[package]] +name = "linux-keyutils" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "761e49ec5fd8a5a463f9b84e877c373d888935b71c6be78f3767fe2ae6bed18e" dependencies = [ - "serde", - "serde_test", + "bitflags 2.9.1", + "libc", ] [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -2880,27 +3877,45 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +dependencies = [ + "value-bag", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "lucide-leptos" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4148eece2243ffac848135656b001183f58388723c009d80e0e299e31f0dda8" +dependencies = [ + "leptos", +] [[package]] name = "manyhow" -version = "0.8.1" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516b76546495d933baa165075b95c0a15e8f7ef75e53f56b19b7144d80fd52bd" +checksum = "b33efb3ca6d3b07393750d4030418d594ab1139cee518f0dc88db70fec873587" dependencies = [ "manyhow-macros", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "manyhow-macros" -version = "0.8.1" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ba072c0eadade3160232e70893311f1f8903974488096e2eb8e48caba2f0cf1" +checksum = "46fce34d199b78b6e6073abf984c9cf5fd3e9330145a93ee0738a7443e371495" dependencies = [ "proc-macro-utils", "proc-macro2", @@ -2918,9 +3933,9 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "md-5" @@ -2940,9 +3955,18 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "memoffset" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] [[package]] name = "mime" @@ -2952,9 +3976,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -2968,34 +3992,76 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.48.0", ] +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "ndk-context" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" + +[[package]] +name = "next_tuple" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60993920e071b0c9b66f14e2b32740a4e27ffc82854dcd72035887f336a09a28" + [[package]] name = "nix" -version = "0.27.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.1", "cfg-if", + "cfg_aliases", "libc", + "memoffset", ] [[package]] @@ -3008,19 +4074,13 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nonempty" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" - [[package]] name = "notify" version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.1", "crossbeam-channel", "filetime", "fsevent-sys", @@ -3028,7 +4088,7 @@ dependencies = [ "kqueue", "libc", "log", - "mio", + "mio 0.8.11", "walkdir", "windows-sys 0.48.0", ] @@ -3045,14 +4105,13 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -3067,7 +4126,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "smallvec", "zeroize", ] @@ -3080,19 +4139,18 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -3101,9 +4159,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -3111,75 +4169,142 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.14.0" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi 0.5.2", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ - "hermit-abi", "libc", ] [[package]] name = "oauth2" -version = "4.4.2" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c38841cdd844847e3e7c8d29cef9dcfed8877f8f56f9071f77843ecf3baf937f" +checksum = "51e219e79014df21a225b1860a479e2dcd7cbd9130f4defd4bd0e191ea31d67d" dependencies = [ - "base64 0.13.1", + "base64", "chrono", - "getrandom", - "http 0.2.9", - "rand", + "getrandom 0.2.16", + "http", + "rand 0.8.5", "reqwest", "serde", "serde_json", "serde_path_to_error", "sha2", - "thiserror 1.0.58", + "thiserror 1.0.69", "url", ] [[package]] -name = "object" -version = "0.32.2" +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + +[[package]] +name = "objc2-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3e0adef53c21f888deb4fa59fc59f7eb17404926ee8a6f59f5df0fd7f9f3272" +dependencies = [ + "bitflags 2.9.1", + "objc2", +] + +[[package]] +name = "oco_ref" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "ed0423ff9973dea4d6bd075934fdda86ebb8c05bdf9d6b0507067d4a1226371d" dependencies = [ - "memchr", + "serde", + "thiserror 2.0.17", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openssl" +version = "0.10.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.1.6+3.1.4" +version = "300.5.1+3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" +checksum = "735230c832b28c000e3bc117119e6466a663ec73506bc0a9907ea4187508e42a" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.95" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -3190,47 +4315,51 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.18.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" +checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" dependencies = [ - "opentelemetry_api", - "opentelemetry_sdk", + "futures-core", + "futures-sink", + "js-sys", + "pin-project-lite", + "thiserror 2.0.17", + "tracing", ] [[package]] -name = "opentelemetry_api" -version = "0.18.0" +name = "opentelemetry-semantic-conventions" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" -dependencies = [ - "futures-channel", - "futures-util", - "indexmap 1.9.3", - "js-sys", - "once_cell", - "pin-project-lite", - "thiserror 1.0.58", -] +checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2" [[package]] name = "opentelemetry_sdk" -version = "0.18.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" dependencies = [ - "async-trait", - "crossbeam-channel", "futures-channel", "futures-executor", "futures-util", - "once_cell", - "opentelemetry_api", + "opentelemetry", "percent-encoding", - "rand", - "thiserror 1.0.58", + "rand 0.9.2", + "thiserror 2.0.17", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "or_poisoned" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c04f5d74368e4d0dfe06c45c8627c81bd7c317d52762d118fb9b3076f6420fd" + [[package]] name = "ordered-float" version = "2.10.1" @@ -3242,29 +4371,29 @@ dependencies = [ [[package]] name = "ordered-float" -version = "3.9.2" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" dependencies = [ "num-traits", ] [[package]] name = "orion" -version = "0.17.6" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abdb10181903c8c4b016ba45d6d6d5af1a1e2a461aa4763a83b87f5df4695e5" +checksum = "21b3da83b2b4cdc74ab6a556b2e7b473da046d5aa4008c0a7a3ae96b1b4aabb4" dependencies = [ - "fiat-crypto", + "fiat-crypto 0.3.0", "subtle", "zeroize", ] [[package]] name = "ouroboros" -version = "0.17.2" +version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2ba07320d39dfea882faa70554b4bd342a5f273ed59ba7c1c6b4c840492c954" +checksum = "1e0f050db9c44b97a94723127e6be766ac5c340c48f2c4bb3ffa11713744be59" dependencies = [ "aliasable", "ouroboros_macro", @@ -3273,15 +4402,15 @@ dependencies = [ [[package]] name = "ouroboros_macro" -version = "0.17.2" +version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec4c6225c69b4ca778c0aea097321a64c421cf4577b331c61b229267edabb6f8" +checksum = "3c7028bdd3d43083f6d8d4d5187680d0d3560d54df4cc9d752005268b41e64d0" dependencies = [ - "heck", - "proc-macro-error", + "heck 0.4.1", "proc-macro2", + "proc-macro2-diagnostics", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] @@ -3304,9 +4433,9 @@ dependencies = [ [[package]] name = "p384" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" dependencies = [ "ecdsa", "elliptic-curve", @@ -3324,37 +4453,37 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "rand_core", + "rand_core 0.6.4", "sha2", ] -[[package]] -name = "pad-adapter" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56d80efc4b6721e8be2a10a5df21a30fa0b470f1539e53d8b4e6e75faf938b63" - [[package]] name = "pageant" -version = "0.0.2" +version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6f0e349ea8dea1b50aa17c082777d30df133d89898c7568a615354772d3731" +checksum = "bdd27df01428302f915ea74737fe88170dd1bab4cbd00ff9548ca85618fcd4e4" dependencies = [ "bytes", "delegate", "futures", "log", - "rand", - "thiserror 1.0.58", + "rand 0.8.5", + "thiserror 1.0.69", "tokio", "windows", ] +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -3362,26 +4491,26 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall", "smallvec", - "windows-sys 0.42.0", + "windows-targets 0.52.6", ] [[package]] name = "pasetors" -version = "0.6.8" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b36d47c66f2230dd1b7143d9afb2b4891879020210eddf2ccb624e529b96dba" +checksum = "03e1ed71dcdf863d9f66d9de86de714db38aedc2fcabc1a60207d1fde603e2d5" dependencies = [ "ct-codecs", "ed25519-compact", - "getrandom", + "getrandom 0.3.3", "orion", "regex", "serde_json", @@ -3397,21 +4526,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" [[package]] name = "pbkdf2" @@ -3429,7 +4558,7 @@ version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" dependencies = [ - "base64 0.22.1", + "base64", "serde", ] @@ -3450,20 +4579,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.5" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", - "thiserror 1.0.58", + "thiserror 2.0.17", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.5" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" +checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" dependencies = [ "pest", "pest_generator", @@ -3471,53 +4600,61 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.5" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" +checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "pest_meta" -version = "2.7.5" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" +checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" dependencies = [ - "once_cell", "pest", "sha2", ] +[[package]] +name = "pgvector" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc58e2d255979a31caa7cabfa7aac654af0354220719ab7a68520ae7a91e8c0b" +dependencies = [ + "serde", +] + [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -3525,6 +4662,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand 2.3.0", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -3559,15 +4707,45 @@ checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", "pkcs5", - "rand_core", + "rand_core 0.6.4", "spki", ] [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "8ee9b2fa7a4517d2c91ff5bc6c297a427a96749d15f98fcdbb22c05571a4d4b7" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.5.2", + "pin-project-lite", + "rustix 1.0.8", + "windows-sys 0.60.2", +] [[package]] name = "poly1305" @@ -3582,9 +4760,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -3592,6 +4770,15 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -3600,18 +4787,21 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "ff24dfcda44452b9816fff4cd4227e1bb73ff5a2f1bc1105aa92fb8565ce44d2" dependencies = [ "proc-macro2", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] @@ -3625,42 +4815,40 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] -name = "proc-macro-error" -version = "1.0.4" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ - "proc-macro-error-attr", "proc-macro2", "quote", - "syn 1.0.105", - "version_check", ] [[package]] -name = "proc-macro-error-attr" -version = "1.0.4" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ + "proc-macro-error-attr2", "proc-macro2", "quote", - "version_check", + "syn 2.0.104", ] [[package]] name = "proc-macro-utils" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f59e109e2f795a5070e69578c4dc101068139f74616778025ae1011d4cd41a8" +checksum = "eeaf08a13de400bc215877b5bdc088f241b12eb42f0a548d3390dc1c56bb7071" dependencies = [ "proc-macro2", "quote", @@ -3669,9 +4857,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -3684,32 +4872,31 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", "version_check", "yansi", ] [[package]] name = "procfs" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f" +checksum = "25485360a54d6861439d60facef26de713b1e126bf015ec8f98239467a2b82f7" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.1", "chrono", "flate2", - "hex", "procfs-core", - "rustix", + "rustix 1.0.8", ] [[package]] name = "procfs-core" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec" +checksum = "e6401bf7b6af22f78b563665d15a22e9aef27775b79b149a66ca022468a4e405" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.1", "chrono", "hex", ] @@ -3731,105 +4918,281 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.0", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.0", + "tracing", + "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] [[package]] name = "quote-use" -version = "0.7.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7b5abe3fe82fdeeb93f44d66a7b444dedf2e4827defb0a8e69c437b2de2ef94" +checksum = "9619db1197b497a36178cfc736dc96b271fe918875fbf1344c436a7e93d0321e" dependencies = [ "quote", "quote-use-macros", - "syn 2.0.98", ] [[package]] name = "quote-use-macros" -version = "0.7.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ea44c7e20f16017a76a245bb42188517e13d16dcb1aa18044bc406cdc3f4af" +checksum = "82ebfb7faafadc06a7ab141a6f67bcfb24cb8beb158c6fe933f2f035afa99f35" dependencies = [ - "derive-where", + "proc-macro-utils", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "rcgen" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fae430c6b28f1ad601274e78b7dffa0546de0b73b4cd32f46723c0c2a16f7a5" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "reactive_graph" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c8fb64b85138d34e26f0b1b853f44f592eeb0b9976bfa58897fa8beda65f2ea" +dependencies = [ + "any_spawner", + "async-lock 3.4.0", + "futures", + "guardian", + "hydration_context", + "or_poisoned", + "pin-project-lite", + "rustc-hash", + "rustc_version", + "send_wrapper", + "serde", + "slotmap", + "thiserror 2.0.17", + "web-sys", ] [[package]] -name = "radium" -version = "0.7.0" +name = "reactive_stores" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +checksum = "79983e88dfd1a2925e29a4853ab9161b234ea78dd0d44ed33a706c9cd5e35762" +dependencies = [ + "dashmap", + "guardian", + "itertools 0.14.0", + "or_poisoned", + "paste", + "reactive_graph", + "reactive_stores_macro", + "rustc-hash", + "send_wrapper", +] [[package]] -name = "rand" -version = "0.8.5" +name = "reactive_stores_macro" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "c1d6c624f2babd2319a55f39637b62d38dd327f8219c6727479792fac3f670b6" dependencies = [ - "libc", - "rand_chacha", - "rand_core", + "convert_case 0.8.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "redox_syscall" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "ppv-lite86", - "rand_core", + "bitflags 2.9.1", ] [[package]] -name = "rand_core" -version = "0.6.4" +name = "redox_users" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ - "getrandom", + "getrandom 0.2.16", + "libredox", + "thiserror 2.0.17", ] [[package]] -name = "redox_syscall" -version = "0.2.16" +name = "ref-cast" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ - "bitflags 1.3.2", + "ref-cast-impl", ] [[package]] -name = "redox_syscall" -version = "0.4.1" +name = "ref-cast-impl" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ - "bitflags 1.3.2", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] name = "regex" -version = "1.9.4" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.7", - "regex-syntax 0.7.5", + "regex-automata 0.4.13", + "regex-syntax 0.8.5", ] [[package]] @@ -3843,13 +5206,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.5", ] [[package]] @@ -3860,60 +5223,58 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.26" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ - "base64 0.21.7", + "base64", "bytes", - "encoding_rs", "futures-core", "futures-util", - "h2 0.3.21", - "http 0.2.9", - "http-body 0.4.5", - "hyper 0.14.27", - "hyper-rustls 0.24.2", - "ipnet", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", "js-sys", "log", - "mime", - "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.8", - "rustls-pemfile 1.0.4", + "quinn", + "rustls", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", + "sync_wrapper", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-util", + "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", - "winreg", + "webpki-roots 1.0.2", ] [[package]] @@ -3928,26 +5289,27 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.3" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", - "getrandom", + "cfg-if", + "getrandom 0.2.16", "libc", - "spin 0.9.8", - "untrusted", - "windows-sys 0.48.0", + "untrusted 0.9.0", + "windows-sys 0.52.0", ] [[package]] name = "rkyv" -version = "0.7.42" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec", "bytecheck", + "bytes", "hashbrown 0.12.3", "ptr_meta", "rend", @@ -3959,20 +5321,20 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.42" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] name = "rsa" -version = "0.9.3" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ef35bf3e7fe15a53c4ab08a998e42271eab13eb0db224126bc7bc4c4bad96d" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" dependencies = [ "const-oid", "digest", @@ -3981,7 +5343,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sha2", "signature", "spki", @@ -3991,32 +5353,33 @@ dependencies = [ [[package]] name = "rstml" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe542870b8f59dd45ad11d382e5339c9a1047cde059be136a7016095bbdefa77" +checksum = "61cf4616de7499fc5164570d40ca4e1b24d231c6833a88bff0fe00725080fd56" dependencies = [ + "derive-where", "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.98", + "syn 2.0.104", "syn_derive", - "thiserror 1.0.58", + "thiserror 2.0.17", ] [[package]] name = "russh" -version = "0.50.2" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125d81ea357e708dd04bcaab60bb664e759c0bd3553995ca6cdfb9f87a9ae43d" +checksum = "9b71d6d784c75ab19c421b7c3df21b100c9643f8b2563ae345a495ee792e85e4" dependencies = [ "aes", - "aes-gcm", - "bitflags 2.6.0", + "aws-lc-rs", + "base64ct", + "bitflags 2.9.1", "block-padding", "byteorder", "bytes", "cbc", - "chacha20", "ctr", "curve25519-dalek", "data-encoding", @@ -4030,7 +5393,7 @@ dependencies = [ "flate2", "futures", "generic-array", - "getrandom", + "getrandom 0.2.16", "hex-literal", "hmac", "home", @@ -4048,9 +5411,8 @@ dependencies = [ "pkcs1", "pkcs5", "pkcs8", - "poly1305", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rsa", "russh-cryptovec", "russh-util", @@ -4061,30 +5423,30 @@ dependencies = [ "spki", "ssh-encoding", "subtle", - "thiserror 1.0.58", + "thiserror 1.0.69", "tokio", - "tokio-stream", "typenum", "zeroize", ] [[package]] name = "russh-cryptovec" -version = "0.50.2" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcb7c127135848b47715b5bcb13d8a27ccd86ce1de1c15eab5982df91fe279a" +checksum = "4fb0ed583ff0f6b4aa44c7867dd7108df01b30571ee9423e250b4cc939f8c6cf" dependencies = [ "libc", "log", + "nix", "ssh-encoding", "winapi", ] [[package]] name = "russh-util" -version = "0.50.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c698d702527b51a82e64de98d506e9c1e83a063035d41df4f2354499ec090b79" +checksum = "668424a5dde0bcb45b55ba7de8476b93831b4aa2fa6947e145f3b053e22c60b6" dependencies = [ "chrono", "tokio", @@ -4094,108 +5456,77 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.36.0" +version = "1.37.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" +checksum = "b203a6425500a03e0919c42d3c47caca51e79f1132046626d2c8871c5092035d" dependencies = [ "arrayvec", "borsh", "bytes", "num-traits", - "rand", + "rand 0.8.5", "rkyv", "serde", "serde_json", ] -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.28" +version = "0.37.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" dependencies = [ - "bitflags 2.6.0", + "bitflags 1.3.2", "errno", + "io-lifetimes", "libc", - "linux-raw-sys", - "windows-sys 0.52.0", -] - -[[package]] -name = "rustls" -version = "0.21.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", ] [[package]] -name = "rustls" -version = "0.22.2" +name = "rustix" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ - "log", - "ring", - "rustls-pki-types", - "rustls-webpki 0.102.8", - "subtle", - "zeroize", + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys 0.9.4", + "windows-sys 0.60.2", ] [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.8", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" -dependencies = [ - "openssl-probe", - "rustls-pemfile 2.1.0", - "rustls-pki-types", - "schannel", - "security-framework 2.10.0", -] - [[package]] name = "rustls-native-certs" version = "0.8.1" @@ -4210,61 +5541,45 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - -[[package]] -name = "rustls-pemfile" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.21.7", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" - -[[package]] -name = "rustls-webpki" -version = "0.101.7" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ - "ring", - "untrusted", + "web-time", + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", - "untrusted", + "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "salsa20" @@ -4294,10 +5609,35 @@ dependencies = [ ] [[package]] -name = "scopeguard" +name = "schemars" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +dependencies = [ + "dyn-clone", + "ref-cast", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "301858a4023d78debd2353c7426dc486001bddc91ae31a76fb1f55132f7e2633" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.104", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scrypt" @@ -4310,42 +5650,34 @@ dependencies = [ "sha2", ] -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sea-bae" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd3534a9978d0aa7edd2808dc1f8f31c4d0ecd31ddf71d997b3c98e9f3c9114" +checksum = "f694a6ab48f14bc063cfadff30ab551d3c7e46d8f81836c51989d548f44a2a25" dependencies = [ - "heck", - "proc-macro-error", + "heck 0.4.1", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "sea-orm" -version = "0.12.12" +version = "1.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cbf88748872fa54192476d6d49d0775e208566a72656e267e45f6980b926c8d" +checksum = "699b1ec145a6530c8f862eed7529d8a6068392e628d81cc70182934001e9c2a3" dependencies = [ "async-stream", "async-trait", "bigdecimal", "chrono", - "futures", + "derive_more", + "futures-util", "log", "ouroboros", + "pgvector", "rust_decimal", "sea-orm-macros", "sea-query", @@ -4353,8 +5685,8 @@ dependencies = [ "serde", "serde_json", "sqlx", - "strum 0.25.0", - "thiserror 1.0.58", + "strum 0.26.3", + "thiserror 2.0.17", "time", "tracing", "url", @@ -4363,45 +5695,62 @@ dependencies = [ [[package]] name = "sea-orm-cli" -version = "0.12.12" +version = "1.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f17eb697616be2f3e4ea3b468a44cfb6848750dec522fdef4caa44bf5a02beb" +checksum = "500cd31ebb07814d4c7b73796708bfab6c13d22f8db072cdb5115f967f4d5d2c" dependencies = [ + "async-std", "chrono", "clap", "dotenvy", "glob", "regex", + "sea-orm-codegen", "sea-schema", + "sqlx", + "tokio", "tracing", "tracing-subscriber", "url", ] +[[package]] +name = "sea-orm-codegen" +version = "1.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1503f30d7d41c4adbd513f10cd8fb9e300ce768885ab2994c073979346070a99" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "sea-query", + "syn 2.0.104", + "tracing", +] + [[package]] name = "sea-orm-macros" -version = "0.12.12" +version = "1.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0dbc880d47aa53c6a572e39c99402c7fad59b50766e51e0b0fc1306510b0555" +checksum = "b0c964f4b7f34f53decf381bc88f03187b9355e07f356ce65544626e781a9585" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "sea-bae", - "syn 2.0.98", + "syn 2.0.104", "unicode-ident", ] [[package]] name = "sea-orm-migration" -version = "0.12.12" +version = "1.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42807e13d38edd4cc1f9cbb370df898d43a9a143f6161dcdb8424a6de002def" +checksum = "977e3f71486b04371026d1ecd899f49cf437f832cd11d463f8948ee02e47ed9e" dependencies = [ "async-trait", "clap", "dotenvy", - "futures", "sea-orm", "sea-orm-cli", "sea-schema", @@ -4411,15 +5760,14 @@ dependencies = [ [[package]] name = "sea-query" -version = "0.30.7" +version = "0.32.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4166a1e072292d46dc91f31617c2a1cdaf55a8be4b5c9f4bf2ba248e3ac4999b" +checksum = "64c91783d1514b99754fc6a4079081dcc2c587dadbff65c48c7f62297443536a" dependencies = [ "bigdecimal", "chrono", - "derivative", "inherent", - "ordered-float 3.9.2", + "ordered-float 4.6.0", "rust_decimal", "sea-query-derive", "serde_json", @@ -4429,9 +5777,9 @@ dependencies = [ [[package]] name = "sea-query-binder" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36bbb68df92e820e4d5aeb17b4acd5cc8b5d18b2c36a4dd6f4626aabfa7ab1b9" +checksum = "b0019f47430f7995af63deda77e238c17323359af241233ec768aba1faea7608" dependencies = [ "bigdecimal", "chrono", @@ -4445,38 +5793,41 @@ dependencies = [ [[package]] name = "sea-query-derive" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25a82fcb49253abcb45cdcb2adf92956060ec0928635eb21b4f7a6d8f25ab0bc" +checksum = "bae0cbad6ab996955664982739354128c58d16e126114fe88c2a493642502aab" dependencies = [ - "heck", + "darling 0.20.11", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.98", - "thiserror 1.0.58", + "syn 2.0.104", + "thiserror 2.0.17", ] [[package]] name = "sea-schema" -version = "0.14.2" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d148608012d25222442d1ebbfafd1228dbc5221baf4ec35596494e27a2394e" +checksum = "2239ff574c04858ca77485f112afea1a15e53135d3097d0c86509cef1def1338" dependencies = [ "futures", "sea-query", + "sea-query-binder", "sea-schema-derive", + "sqlx", ] [[package]] name = "sea-schema-derive" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f686050f76bffc4f635cda8aea6df5548666b830b52387e8bc7de11056d11e" +checksum = "debdc8729c37fdbf88472f97fd470393089f997a909e535ff67c544d18cfccf0" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] @@ -4510,12 +5861,12 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.3", + "bitflags 2.9.1", + "core-foundation 0.9.4", "core-foundation-sys", "libc", "security-framework-sys", @@ -4527,8 +5878,8 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.6.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -4544,17 +5895,11 @@ dependencies = [ "libc", ] -[[package]] -name = "self_cell" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e388332cd64eb80cd595a00941baf513caffae8dce9cfd0467fc9c66397dade6" - [[package]] name = "semver" -version = "1.0.14" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "send_wrapper" @@ -4567,10 +5912,11 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -4585,43 +5931,53 @@ dependencies = [ ] [[package]] -name = "serde-wasm-bindgen" -version = "0.6.3" +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b713f70513ae1f8d92665bbbbda5c295c2cf1da5542881ae5eefe20c9af132" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ - "js-sys", - "serde", - "wasm-bindgen", + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -4629,31 +5985,22 @@ dependencies = [ [[package]] name = "serde_qs" -version = "0.12.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0431a35568651e363364210c91983c1da5eb29404d9f0928b67d4ebcfa7d330c" +checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" dependencies = [ "percent-encoding", "serde", - "thiserror 1.0.58", + "thiserror 2.0.17", ] [[package]] name = "serde_spanned" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_test" -version = "1.0.176" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2f49ace1498612d14f7e0b8245519584db8299541dfe31a06374a828d620ab" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -4670,11 +6017,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.30" +version = "0.9.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +checksum = "a0623d197252096520c6f2a5e1171ee436e5af99a5d7caa2891e55e61950e6d9" dependencies = [ - "indexmap 2.1.0", + "indexmap", "itoa", "ryu", "serde", @@ -4683,25 +6030,29 @@ dependencies = [ [[package]] name = "server_fn" -version = "0.6.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fab54d9dd2d7e9eba4efccac41d2ec3e7c6e9973d14c0486d662a32662320c" +checksum = "36dab1d4cbc272e15f4475d18e90a59488d1d1efe4e7db3f71b73a43d8c5f02b" dependencies = [ + "base64", "bytes", - "ciborium", + "const-str", "const_format", "dashmap", "futures", - "gloo-net 0.5.0", - "http 1.2.0", + "gloo-net", + "http", "js-sys", - "once_cell", + "pin-project-lite", + "rustc_version", + "rustversion", "send_wrapper", "serde", "serde_json", "serde_qs", "server_fn_macro_default", - "thiserror 1.0.58", + "thiserror 2.0.17", + "throw_error", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -4712,26 +6063,27 @@ dependencies = [ [[package]] name = "server_fn_macro" -version = "0.6.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be6011b586a0665546b7ced372b0be690d9e005d3f8524795da2843274d7720" +checksum = "b381389c2307b4b83ce70516c0408d99727ab9f73645e065bb51e6be09cb2f6b" dependencies = [ "const_format", - "convert_case", + "convert_case 0.8.0", "proc-macro2", "quote", - "syn 2.0.98", + "rustc_version", + "syn 2.0.104", "xxhash-rust", ] [[package]] name = "server_fn_macro_default" -version = "0.6.5" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "752ed78ec49132d154b922cf5ab6485680cab039a75740c48ea2db621ad481da" +checksum = "63eb08f80db903d3c42f64e60ebb3875e0305be502bdc064ec0a0eab42207f00" dependencies = [ "server_fn_macro", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] @@ -4747,9 +6099,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -4758,18 +6110,24 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] @@ -4781,45 +6139,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] name = "simdutf8" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "slab" -version = "0.4.7" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "slotmap" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342" +checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a" dependencies = [ - "serde", "version_check", ] [[package]] name = "smallvec" -version = "1.13.1" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -4827,19 +6184,23 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] -name = "spin" -version = "0.5.2" +name = "socket2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] [[package]] name = "spin" @@ -4852,30 +6213,19 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", ] -[[package]] -name = "sqlformat" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" -dependencies = [ - "itertools 0.11.0", - "nom", - "unicode_categories", -] - [[package]] name = "sqlx" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" +checksum = "1fefb893899429669dcdd979aff487bd78f4064e5e7907e4269081e0ef7d97dc" dependencies = [ "sqlx-core", "sqlx-macros", @@ -4886,75 +6236,71 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" +checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" dependencies = [ - "ahash 0.8.6", - "atoi", + "async-io 1.13.0", + "async-std", + "base64", "bigdecimal", - "byteorder", "bytes", "chrono", "crc", "crossbeam-queue", - "dotenvy", "either", - "event-listener", - "futures-channel", + "event-listener 5.4.0", "futures-core", "futures-intrusive", "futures-io", "futures-util", + "hashbrown 0.15.4", "hashlink", - "hex", - "indexmap 2.1.0", + "indexmap", "log", "memchr", + "native-tls", "once_cell", - "paste", "percent-encoding", "rust_decimal", - "rustls 0.21.8", - "rustls-pemfile 1.0.4", + "rustls", "serde", "serde_json", "sha2", "smallvec", - "sqlformat", - "thiserror 1.0.58", + "thiserror 2.0.17", "time", "tokio", "tokio-stream", "tracing", "url", "uuid", - "webpki-roots", + "webpki-roots 0.26.11", ] [[package]] name = "sqlx-macros" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" +checksum = "a2d452988ccaacfbf5e0bdbc348fb91d7c8af5bee192173ac3636b5fb6e6715d" dependencies = [ "proc-macro2", "quote", "sqlx-core", "sqlx-macros-core", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] name = "sqlx-macros-core" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" +checksum = "19a9c1841124ac5a61741f96e1d9e2ec77424bf323962dd894bdb93f37d5219b" dependencies = [ - "atomic-write-file", + "async-std", "dotenvy", "either", - "heck", + "heck 0.5.0", "hex", "once_cell", "proc-macro2", @@ -4966,22 +6312,21 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 1.0.105", - "tempfile", + "syn 2.0.104", "tokio", "url", ] [[package]] name = "sqlx-mysql" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" +checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" dependencies = [ "atoi", - "base64 0.21.7", + "base64", "bigdecimal", - "bitflags 2.6.0", + "bitflags 2.9.1", "byteorder", "bytes", "chrono", @@ -5003,7 +6348,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rand", + "rand 0.8.5", "rsa", "rust_decimal", "serde", @@ -5012,7 +6357,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 1.0.58", + "thiserror 2.0.17", "time", "tracing", "uuid", @@ -5021,14 +6366,14 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" +checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" dependencies = [ "atoi", - "base64 0.21.7", + "base64", "bigdecimal", - "bitflags 2.6.0", + "bitflags 2.9.1", "byteorder", "chrono", "crc", @@ -5036,7 +6381,6 @@ dependencies = [ "etcetera", "futures-channel", "futures-core", - "futures-io", "futures-util", "hex", "hkdf", @@ -5048,16 +6392,15 @@ dependencies = [ "memchr", "num-bigint", "once_cell", - "rand", + "rand 0.8.5", "rust_decimal", "serde", "serde_json", - "sha1", "sha2", "smallvec", "sqlx-core", "stringprep", - "thiserror 1.0.58", + "thiserror 2.0.17", "time", "tracing", "uuid", @@ -5066,9 +6409,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.7.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" +checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea" dependencies = [ "atoi", "chrono", @@ -5082,11 +6425,12 @@ dependencies = [ "log", "percent-encoding", "serde", + "serde_urlencoded", "sqlx-core", + "thiserror 2.0.17", "time", "tracing", "url", - "urlencoding", "uuid", ] @@ -5119,6 +6463,12 @@ dependencies = [ "sha2", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -5127,63 +6477,56 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.25.0" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" [[package]] name = "strum" -version = "0.26.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "rustversion", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "1.0.105" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -5192,9 +6535,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -5203,47 +6546,90 @@ dependencies = [ [[package]] name = "syn_derive" -version = "0.1.8" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +checksum = "cdb066a04799e45f5d582e8fc6ec8e6d6896040d00898eb4e6a835196815b219" dependencies = [ - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] -name = "sync_wrapper" -version = "1.0.2" +name = "synstructure" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] [[package]] -name = "system-configuration" -version = "0.5.1" +name = "tachys" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "a8112797d6fa2ce26c1633e518759eb357ff28cd5e06d3bde2cb2ec850c3d87f" dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.3", - "system-configuration-sys", + "any_spawner", + "async-trait", + "const_str_slice_concat", + "drain_filter_polyfill", + "either_of", + "erased", + "futures", + "html-escape", + "indexmap", + "itertools 0.14.0", + "js-sys", + "linear-map", + "next_tuple", + "oco_ref", + "or_poisoned", + "parking_lot", + "paste", + "reactive_graph", + "reactive_stores", + "rustc-hash", + "rustc_version", + "send_wrapper", + "slotmap", + "throw_error", + "wasm-bindgen", + "web-sys", ] [[package]] -name = "system-configuration-sys" -version = "0.5.0" +name = "tailwind_fuse" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "7ca71fb01735fbc6fa13e9390d7a3037dde97053c0b65c0c72c0159cd009d26b" dependencies = [ - "core-foundation-sys", - "libc", + "nom", + "tailwind_fuse_macro", +] + +[[package]] +name = "tailwind_fuse_macro" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efa51b9ff80b5533001f8452d254a688bc7bb39c6bb77f9e0a19c1664d035888" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] @@ -5254,9 +6640,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.40" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" dependencies = [ "filetime", "libc", @@ -5265,21 +6651,22 @@ dependencies = [ [[package]] name = "tarpc" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93a1870169fb9490fb3b37df7f50782986475c33cb90955f9f9b9ae659124200" +checksum = "e5cb86a4b5b941909e9896289c01b46ff0bec756b01f366b0d5490a5e8ed7871" dependencies = [ "anyhow", "fnv", "futures", "humantime", "opentelemetry", + "opentelemetry-semantic-conventions", "pin-project", - "rand", + "rand 0.8.5", "serde", "static_assertions", "tarpc-plugins", - "thiserror 1.0.58", + "thiserror 2.0.17", "tokio", "tokio-serde", "tokio-util", @@ -5289,86 +6676,97 @@ dependencies = [ [[package]] name = "tarpc-plugins" -version = "0.13.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0b234b43836d3ee3bcc318eeaf38ec27f28e398cb9d700b62e9e47f2744536" +checksum = "26ef4401b013b1f5218ba33ea8f1eddbfcc00ec8db073ef995c192e71f08f027" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.104", ] [[package]] name = "tempfile" -version = "3.8.1" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ - "cfg-if", - "fastrand", - "redox_syscall 0.4.1", - "rustix", - "windows-sys 0.48.0", + "fastrand 2.3.0", + "getrandom 0.3.3", + "once_cell", + "rustix 1.0.8", + "windows-sys 0.59.0", ] [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl 1.0.58", + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.17", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ - "once_cell", + "cfg-if", +] + +[[package]] +name = "throw_error" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41e42a6afdde94f3e656fae18f837cb9bbe500a5ac5de325b09f3ec05b9c28e3" +dependencies = [ + "pin-project-lite", ] [[package]] name = "time" -version = "0.3.36" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -5377,25 +6775,35 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -5408,74 +6816,61 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", - "mio", - "num_cpus", + "mio 1.0.4", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.8", - "tokio", + "syn 2.0.104", ] [[package]] -name = "tokio-rustls" -version = "0.25.0" +name = "tokio-native-tls" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ - "rustls 0.22.2", - "rustls-pki-types", + "native-tls", "tokio", ] [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.23", + "rustls", "tokio", ] [[package]] name = "tokio-serde" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" +checksum = "caf600e7036b17782571dd44fa0a5cea3c82f60db5137f774a325a76a0d6852b" dependencies = [ "bincode", "bytes", - "educe", + "educe 0.5.11", "futures-core", "futures-sink", "pin-project", @@ -5485,9 +6880,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -5497,112 +6892,88 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite 0.21.0", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.26.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4bf6fecd69fcdede0ec680aaf474cdab988f9de6bc73d3758f0160e3b7025a" +checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857" dependencies = [ "futures-util", "log", + "native-tls", "tokio", - "tungstenite 0.26.1", + "tokio-native-tls", + "tungstenite", ] [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "slab", "tokio", - "tracing", ] [[package]] name = "toml" -version = "0.5.11" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" dependencies = [ - "serde", + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime 0.7.3", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] -name = "toml" -version = "0.8.11" +name = "toml_datetime" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af06656561d28735e9c1cd63dfd57132c8155426aa6af24f36a00a351f88c48e" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.7", -] +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" dependencies = [ - "serde", + "serde_core", ] [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.1.0", - "toml_datetime", - "winnow 0.5.40", + "indexmap", + "toml_datetime 0.6.11", + "winnow", ] [[package]] -name = "toml_edit" -version = "0.22.7" +name = "toml_parser" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18769cd1cec395d70860ceb4d932812a0b4d06b1a4bb336745a4d21b9496e992" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" dependencies = [ - "indexmap 2.1.0", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.5", + "winnow", ] [[package]] -name = "tower" -version = "0.4.13" +name = "toml_writer" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", - "tracing", -] +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" [[package]] name = "tower" @@ -5613,7 +6984,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-util", "tower-layer", @@ -5623,42 +6994,28 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.6.0", + "base64", + "bitflags 2.9.1", "bytes", + "futures-core", "futures-util", - "http 1.2.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "http-range-header", "httpdate", + "iri-string", "mime", "mime_guess", "percent-encoding", "pin-project-lite", "tokio", "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-http" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" -dependencies = [ - "base64 0.22.1", - "bitflags 2.6.0", - "bytes", - "http 1.2.0", - "http-body 1.0.1", - "mime", - "pin-project-lite", + "tower", "tower-layer", "tower-service", "tracing", @@ -5678,9 +7035,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -5695,32 +7052,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror 1.0.58", + "thiserror 1.0.69", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", ] +[[package]] +name = "tracing-journald" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" +dependencies = [ + "libc", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "tracing-log" version = "0.2.0" @@ -5734,22 +7102,25 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.18.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" +checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c" dependencies = [ + "js-sys", "once_cell", "opentelemetry", + "opentelemetry_sdk", "tracing", "tracing-core", "tracing-subscriber", + "web-time", ] [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -5765,126 +7136,104 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "tungstenite" -version = "0.21.0" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http 1.2.0", - "httparse", - "log", - "rand", - "sha1", - "thiserror 1.0.58", - "url", - "utf-8", -] +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.26.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413083a99c579593656008130e29255e54dcaae495be556cc26888f211648c24" +checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442" dependencies = [ - "byteorder", "bytes", "data-encoding", - "http 1.2.0", + "http", "httparse", "log", - "rand", + "native-tls", + "rand 0.9.2", "sha1", - "thiserror 2.0.11", + "thiserror 2.0.17", "utf-8", ] [[package]] name = "typed-builder" -version = "0.18.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444d8748011b93cb168770e8092458cb0f8854f931ff82fdf6ddfbd72a9c933e" +checksum = "ce63bcaf7e9806c206f7d7b9c1f38e0dce8bb165a80af0898161058b19248534" dependencies = [ "typed-builder-macro", ] [[package]] name = "typed-builder-macro" -version = "0.18.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "563b3b88238ec95680aef36bdece66896eaa7ce3c0f1b4f39d38fb2435261352" +checksum = "60d8d828da2a3d759d3519cdf29a5bac49c77d039ad36d0782edadbf9cd5415b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", ] [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] -name = "unicode-segmentation" -version = "1.10.1" +name = "unicode-properties" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] -name = "unicode-xid" -version = "0.2.4" +name = "unicode-segmentation" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] -name = "unicode_categories" -version = "0.1.1" +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -5898,9 +7247,15 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "untrusted" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "untrusted" @@ -5910,9 +7265,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.3.1" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", @@ -5934,31 +7289,45 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8-width" -version = "0.1.6" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3" + +[[package]] +name = "utf8_iter" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5190c9442dcdaf0ddd50f37420417d219ae5261bbf5db120d0f9bab996c9cba1" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.6.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom", + "getrandom 0.3.3", + "js-sys", "serde", + "wasm-bindgen", ] [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "value-bag" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" [[package]] name = "vcpkg" @@ -5968,15 +7337,21 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "waker-fn" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -5993,53 +7368,69 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasite" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6047,28 +7438,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -6077,27 +7471,94 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasm_split_helpers" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c50e0e45d0d871605a21fc4ee93a5380b7bdc41b5eda22e42f0777a4ce79b65c" +dependencies = [ + "async-once-cell", + "or_poisoned", + "wasm_split_macros", +] + +[[package]] +name = "wasm_split_macros" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f8a7f0bf54b0129a337aadfe8b716d843689f69c75b2a6413a0cff2e0d00982" +dependencies = [ + "base16", + "digest", + "quote", + "sha2", + "syn 2.0.104", + "wasm-bindgen", +] + [[package]] name = "web-sys" -version = "0.3.65" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "webbrowser" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf4f3c0ba838e82b4e5ccc4157003fb8c324ee24c058470ffb82820becbde98" +dependencies = [ + "core-foundation 0.10.1", + "jni", + "log", + "ndk-context", + "objc2", + "objc2-foundation", + "url", + "web-sys", +] + [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.2", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "whoami" -version = "1.4.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7" +dependencies = [ + "redox_syscall", + "wasite", +] [[package]] name = "winapi" @@ -6117,11 +7578,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -6142,24 +7603,28 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.51.1" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ - "windows-targets 0.48.5", + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings 0.1.0", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.58.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ - "windows-implement", - "windows-interface", - "windows-result", - "windows-strings", - "windows-targets 0.52.6", + "windows-implement 0.60.0", + "windows-interface 0.59.1", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", ] [[package]] @@ -6170,7 +7635,18 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] @@ -6181,9 +7657,32 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", ] +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + [[package]] name = "windows-result" version = "0.2.0" @@ -6193,29 +7692,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link 0.1.3", +] + [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link 0.1.3", +] + [[package]] name = "windows-sys" -version = "0.42.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.42.2", ] [[package]] @@ -6245,6 +7756,39 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link 0.2.1", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -6269,13 +7813,30 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link 0.1.3", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -6294,6 +7855,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -6312,6 +7879,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -6330,12 +7903,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -6354,6 +7939,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -6372,6 +7963,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -6390,6 +7987,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -6409,33 +8012,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] -name = "winnow" -version = "0.5.40" +name = "windows_x86_64_msvc" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.6.5" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] [[package]] -name = "winreg" -version = "0.50.0" +name = "wit-bindgen-rt" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "cfg-if", - "windows-sys 0.48.0", + "bitflags 2.9.1", ] +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + [[package]] name = "wyz" version = "0.5.1" @@ -6447,76 +8052,188 @@ dependencies = [ [[package]] name = "xattr" -version = "1.1.3" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dae5072fe1f8db8f8d29059189ac175196e410e40ba42d5d4684ae2f750995" +checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "linux-raw-sys", - "rustix", + "rustix 1.0.8", ] [[package]] name = "xxhash-rust" -version = "0.8.7" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9828b178da53440fa9c766a3d2f73f7cf5d0ac1fe3980c1e5018d899fd19e07b" +checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" [[package]] name = "yansi" -version = "1.0.0-rc.1" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1367295b8f788d371ce2dbc842c7b709c73ee1364d30351dd300ec2203b12377" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "yup-oauth2" +version = "12.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4964039ac787bbd306fba65f6a8963b7974ae99515087e506a862674abae6a30" +dependencies = [ + "async-trait", + "base64", + "http", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "log", + "percent-encoding", + "rustls", + "rustls-pemfile", + "seahash", + "serde", + "serde_json", + "thiserror 2.0.17", + "time", + "tokio", + "url", +] [[package]] name = "zerocopy" -version = "0.7.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.25" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.104", + "synstructure", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] [[package]] name = "zstd" -version = "0.13.0" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.0.0" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 210396c..bd6df1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "0.1.0" +version = "0.2.0" edition = "2021" authors = ["Lapdev "] license = "AGPL-3.0" @@ -15,15 +15,15 @@ description.workspace = true [[bin]] name = "lapdev-ws" -path = "lapdev-ws/src/main.rs" +path = "crates/ws/src/main.rs" [[bin]] name = "lapdev-guest-agent" -path = "lapdev-guest-agent/src/main.rs" +path = "crates/guest-agent/src/main.rs" [[bin]] -name = "lapdev-db" -path = "lapdev-db/src/main.rs" +name = "lapdev-kube-manager" +path = "crates/kube-manager/src/main.rs" [dependencies] tokio.workspace = true @@ -32,111 +32,189 @@ anyhow.workspace = true tracing.workspace = true tracing-appender.workspace = true tracing-subscriber.workspace = true -sea-orm-migration.workspace = true +tracing-journald.workspace = true +sea-orm-cli.workspace = true lapdev-db.workspace = true lapdev-ws.workspace = true +lapdev-cli.workspace = true lapdev-guest-agent.workspace = true lapdev-kube.workspace = true +lapdev-kube-manager.workspace = true +dotenvy.workspace = true [workspace] members = [ - "lapdev-api", - "lapdev-conductor", - "lapdev-ws", - "lapdev-rpc", - "lapdev-kube", - "lapdev-dashboard", - "lapdev-db", - "lapdev-common", - "lapdev-enterprise", - "lapdev-proxy-ssh", - "lapdev-proxy-http", - "lapdev-guest-agent", + "crates/api", + "crates/api-hrpc", + "crates/cli", + "crates/conductor", + "crates/ws", + "crates/rpc", + "crates/kube", + "crates/kube-rpc", + "crates/kube-manager", + "crates/kube-sidecar-proxy", + "crates/dashboard", + "crates/db", + "crates/db/migration", + "crates/db/entities", + "crates/common", + "crates/enterprise", + "crates/proxy-ssh", + "crates/proxy-http", + "crates/guest-agent", + "crates/hrpc", + "crates/hrpc-proc-macro", + "crates/devbox-rpc", + "crates/tunnel", ] [workspace.dependencies] -rust_decimal = "1.36.0" -toml = "0.8.11" +rust_decimal = "1.37.1" +toml = "0.9.8" +dotenvy = "0.15.7" clap = { version = "4.5.2", features = ["derive"] } tempfile = "3.8.1" zstd = "0.13.0" tar = "0.4.40" -git2 = { version = "0.18.2", features = ["vendored-libgit2", "vendored-openssl"] } +regex = "1.11.1" +git2 = { version = "0.20.2", features = [ + "vendored-libgit2", + "vendored-openssl", +] } anyhow = "1.0.66" -thiserror = "1.0.58" +thiserror = "2.0.17" async-trait = "0.1.74" -serde = "1.0.190" -pasetors = "0.6.8" -futures = "0.3" -futures-util = "0.3" +serde = { version = "1.0.219", features = ["derive"] } +pasetors = "0.7.7" +pin-project = "1.1.5" +futures = "0.3.31" +futures-util = "0.3.31" serde_yaml = "0.9.30" -serde_json = "1.0.111" +serde_json = "1.0.140" json5 = "0.4.1" -itertools = "0.12.0" +itertools = "0.14.0" base64 = "0.22.0" -oauth2 = "4.2.3" +hex = "0.4" +oauth2 = "5.0.0" include_dir = "0.7.3" -reqwest = {version = "0.11.26", default-features = false, features = ["rustls", "stream", "json"] } -async-compression = { version = "0.4.6", features = ["tokio", "zstd"] } -docker-compose-types = "0.7.0" +reqwest = { version = "0.12.24", default-features = false, features = [ + "rustls-tls", + "stream", + "json", +] } +async-compression = { version = "0.4.33", features = ["tokio", "zstd"] } +docker-compose-types = "0.22.0" tracing = "0.1.40" tracing-appender = "0.2.3" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -chrono = { version = "0.4.31", features = ["serde"] } -tower = { version = "0.4", features = ["util"] } -tower-http = { version = "0.5.2", features = ["fs", "trace"] } -tokio-tungstenite = "0.26.1" +tracing-journald = "0.3.0" +chrono = { version = "0.4.40", features = ["serde"] } +tower = { version = "0.5.2", features = ["util"] } +tower-http = { version = "0.6.6", features = ["fs", "trace"] } +tokio-tungstenite = { version = "0.28.0", features = ["native-tls-vendored"] } +tokio-serde = "0.9.0" data-encoding = "2.4.0" -sha2 = "0.10.8" -rand = "0.8.5" -strum = "0.26.1" -strum_macros = "0.26.1" -procfs = "0.17.0" +secrecy = "0.10.3" +sha2 = "0.10.9" +rand = "0.9.2" +strum = "0.27.1" +strum_macros = "0.27.1" +procfs = "0.18.0" +quinn = { version = "0.11.9", default-features = false, features = [ + "rustls", + "runtime-tokio", +] } +rcgen = "0.14.5" base16ct = { version = "0.2.0", features = ["alloc"] } -axum = { version = "0.7.4", features = ["ws"] } -axum-extra = { version = "0.9.2", features = ["typed-header"] } -axum-client-ip = "0.5.0" -bytes = "1.5.0" -hyper = { version = "1.2.0", features = ["server", "client", "http1", "http2"] } -hyper-util = { version = "0.1.2", features = ["tokio", "client-legacy", "server", "server-auto"] } -http-body-util = "0.1.1" -tarpc = { version = "0.34.0", features = ["full"] } -rustls-webpki = "0.102.2" -rustls-pemfile = "2.1.0" -tokio-rustls = "0.25.0" -tokio-util = "0.7.10" -tokio = { version = "1.36.0", features = ["full"] } -sqlx = { version = "0.7.3", default-features = false, features = ["sqlx-postgres"] } -sea-orm = { version = "0.12.12", features = [ "sqlx-postgres", "sqlx-sqlite", "runtime-tokio-rustls", "macros" ] } -sea-orm-migration = { version = "0.12.6", features = [ "sqlx-postgres", "runtime-tokio-rustls" ] } +axum = { version = "0.8.6", features = ["ws", "macros"] } +axum-extra = { version = "0.12.1", features = ["typed-header"] } +axum-client-ip = "1.1.3" +bytes = "1.10.1" +hyper = { version = "1.7.0", features = ["server", "client", "http1", "http2"] } +hyper-util = { version = "0.1.17", features = [ + "tokio", + "client-legacy", + "server", + "server-auto", +] } +http-body-util = "0.1.3" +kube = { version = "2.0.1", features = ["runtime"] } +k8s-openapi = { version = "0.26.0", features = ["latest"] } +tarpc = { version = "0.37.0", features = ["full"] } +rustls-webpki = "0.103.8" +rustls-pemfile = "2.2.0" +tokio-rustls = { version = "0.26.4", default-features = false, features = [ + "ring", + "logging", + "tls12", +] } +tokio-util = { version = "0.7.17", features = ["codec", "io", "compat"] } +tokio = { version = "1.48.0", features = ["full"] } +sqlx = { version = "0.8.6", default-features = false, features = [ + "sqlx-postgres", +] } +sea-orm = { version = "1.1.17", features = [ + "sqlx-postgres", + "sqlx-sqlite", + "runtime-tokio-rustls", + "macros", +] } +sea-orm-migration = { version = "1.1.17", features = [ + "sqlx-postgres", + "runtime-tokio-rustls", +] } +sea-orm-cli = { version = "1.1.17", features = [ + "sqlx-postgres", + "runtime-tokio-rustls", +] } uuid = { version = "1.6.1", features = ["v4", "serde"] } -russh = {version = "0.50.2" } -lapdev-api = { path = "./lapdev-api" } -lapdev-ws = { path = "./lapdev-ws" } -lapdev-db = { path = "./lapdev-db" } -lapdev-rpc = { path = "./lapdev-rpc" } -lapdev-conductor = { path = "./lapdev-conductor" } -lapdev-proxy-ssh = { path = "./lapdev-proxy-ssh" } -lapdev-proxy-http = { path = "./lapdev-proxy-http" } -lapdev-common = { path = "./lapdev-common" } -lapdev-enterprise = { path = "./lapdev-enterprise" } -lapdev-guest-agent = { path = "./lapdev-guest-agent" } -lapdev-kube = { path = "./lapdev-kube" } +russh = { version = "0.53.0" } +lapdev-api = { path = "./crates/api" } +lapdev-api-hrpc = { path = "./crates/api-hrpc" } +lapdev-cli = { path = "./crates/cli" } +lapdev-ws = { path = "./crates/ws" } +lapdev-db = { path = "./crates/db" } +lapdev-db-entities = { path = "./crates/db/entities" } +lapdev-db-migration = { path = "./crates/db/migration" } +lapdev-rpc = { path = "./crates/rpc" } +lapdev-conductor = { path = "./crates/conductor" } +lapdev-proxy-ssh = { path = "./crates/proxy-ssh" } +lapdev-proxy-http = { path = "./crates/proxy-http" } +lapdev-common = { path = "./crates/common" } +lapdev-enterprise = { path = "./crates/enterprise" } +lapdev-guest-agent = { path = "./crates/guest-agent" } +lapdev-kube = { path = "./crates/kube" } +lapdev-kube-rpc = { path = "./crates/kube-rpc" } +lapdev-kube-manager = { path = "./crates/kube-manager" } +lapdev-kube-sidecar-proxy = { path = "./crates/kube-sidecar-proxy" } +lapdev-devbox-rpc = { path = "./crates/devbox-rpc" } +lapdev-hrpc = { path = "./crates/hrpc" } +lapdev-hrpc-proc-macro = { path = "./crates/hrpc-proc-macro" } +lapdev-tunnel = { path = "./crates/tunnel" } [package.metadata.deb] depends = "$auto, passwd" maintainer-scripts = "pkg/deb/" systemd-units = { unit-name = "lapdev", enable = false } assets = [ - ["target/release/lapdev", "/usr/bin/lapdev", "755"], - ["pkg/common/lapdev.service.preset", "/lib/systemd/system-preset/50-lapdev.preset", "644"], + [ + "target/release/lapdev", + "/usr/bin/lapdev", + "755", + ], + [ + "pkg/common/lapdev.service.preset", + "/lib/systemd/system-preset/50-lapdev.preset", + "644", + ], ] [package.metadata.generate-rpm] post_install_script = "pkg/rpm/lapdev.postinst" post_uninstall_script = "pkg/rpm/lapdev.postuninst" assets = [ - { source = "target/release/lapdev", dest = "/usr/bin/lapdev", mode = "755" }, - { source = "pkg/deb/lapdev.service", dest = "/lib/systemd/system/lapdev.service", mode = "644" }, - { source = "pkg/common/lapdev.service.preset", dest = "/lib/systemd/system-preset/50-lapdev.preset", mode = "644" }, + { source = "target/release/lapdev", dest = "/usr/bin/lapdev", mode = "755" }, + { source = "pkg/deb/lapdev.service", dest = "/lib/systemd/system/lapdev.service", mode = "644" }, + { source = "pkg/common/lapdev.service.preset", dest = "/lib/systemd/system-preset/50-lapdev.preset", mode = "644" }, ] diff --git a/README.md b/README.md index d28f14b..d18e5a2 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ # Lapdev - **Self-hosted remote development enviroment management with ease** + **Seamless dev environments for your Kubernetes apps**
@@ -13,44 +13,48 @@ - - Lapdev Docs + + Lapdev Docs

-**Lapdev** is a self hosted application that spins up remote development environments on your own servers or clouds. It scales from a single machine in the corner to a global fleet of servers. It uses [Devcontainer open specification](https://containers.dev/) for defining your development environment as code. If you’re interested in a deep dive into how Lapdev works, you can read about its [architecture](https://docs.lap.dev/administration/architecture) here. - -## Cloud - -If you don't want to self host, we also have cloud offering of Lapdev using high end Gaming CPU: https://ws.lap.dev/ - -[Read More](https://lap.dev/blog/a-performance-review-of-cloud-dev-envs/) to know how a Gaming CPU can boost the performance. +**Lapdev** gives your team production-accurate development environments that run directly in your Kubernetes cluster. Install the lightweight `lapdev-kube-manager`, choose the workloads you care about, and Lapdev keeps isolated or shared environments in sync with your production manifests. Developers iterate quickly using the [Devbox CLI](https://lapdev.gitbook.io/docs/core-concepts/devbox) for local debugging while staying connected to real cluster resources.
-![](https://lap.dev/images/screenshot.png) +![](https://new.lap.dev/images/screenshot.png)
-## Features - -- **Self hosted with ease:** Lapdev is designed to be self hosted with minimum efforts for installation and maintenance. The application is designed to just work, sparing you from digging too deep into the internals for troubleshooting. +## What Lapdev Does -- **Horizontal scalability:** With a simple yet powerful [architecture](https://docs.lap.dev/administration/architecture), Lapdev can scale from a single machine to a fleet of servers, so that you can have a development environment management system that can grow with your developer teams. +- **Reads production manifests** – Lapdev builds an App Catalog straight from your production Deployments, StatefulSets, ConfigMaps, Secrets, and Services. +- **Creates tailored environments** – Launch fully isolated personal namespaces, shared team baselines, or lightweight branch environments with intelligent traffic routing. +- **Keeps everything in sync** – Monitor production for changes, notify developers, and apply updates when you are ready—no manual YAML drift. +- **Enables local-style iteration** – Devbox routes cluster traffic to your laptop, so you debug locally while remaining wired into in-cluster dependencies. +- **Publishes secure preview URLs** – Share HTTPS endpoints for any service without juggling DNS, certificates, or ingress rules. -- **Development Environment as Code:** Using the [Devcontainer open specification](https://containers.dev/), Lapdev allows you to define your development environment as code. This empowers you to standardize development environments that can be replicated across different developers, avoiding environment related issues and ensuring a consistent setup for everyone. +## Architecture at a Glance -- **Save Onboarding Time:** Onboarding developers to new projects don't need hours or days to prepare the environment on their machines. They can start to code instantly. +Lapdev is made of three components that work together: -## Planned Features +1. **Lapdev API Server** – SaaS control plane that handles auth, orchestration, and secure tunneling. +2. **Lapdev-Kube-Manager** – In-cluster operator that mirrors production workloads into development namespaces. +3. **Devbox CLI** – Developer tooling for traffic interception and access to cluster services from your local machine. -- **More workspace types:** Currently Lapdev only supports container based workspaces, which has its own limitations for example when you want to run a k8s cluster in your development flow. It's planned to have support for more than containers. VMs and bare metal machine support are on the roadmap. And more OS support is planned as well, e.g. when you are developing a cross platform desktop application for Windows, Linux and macOS, Lapdev can spin up development environments on all of them and you can develop and debug from the same local machine without the need to switch machines. +Dig deeper in the [Architecture docs](https://lapdev.gitbook.io/docs/core-concepts/architecture/). -## Installation +## Getting Started -You can see the installation steps [here](https://docs.lap.dev/installation/quickstart). +1. [Connect your Kubernetes cluster](https://lapdev.gitbook.io/docs/how-to-guides/connect-your-kubernetes-cluster) +2. [Create an App Catalog](https://lapdev.gitbook.io/docs/how-to-guides/create-an-app-catalog) +3. [Provision your first environment](https://lapdev.gitbook.io/docs/how-to-guides/create-lapdev-environment) +4. [Develop locally with Devbox](https://lapdev.gitbook.io/docs/how-to-guides/local-development-with-devbox) -## Build from source +## Resources -## Contributing +- [Traffic routing architecture](https://lapdev.gitbook.io/docs/core-concepts/architecture/traffic-routing-architecture) +- [Branch Environment Architecture](https://lapdev.gitbook.io/docs/core-concepts/architecture/branch-environment-architecture) +- [Preview URLs](https://lapdev.gitbook.io/docs/core-concepts/preview-url) +- [Lapdev Docs](https://lapdev.gitbook.io/docs/) diff --git a/crates/api-hrpc/Cargo.toml b/crates/api-hrpc/Cargo.toml new file mode 100644 index 0000000..d25cdcc --- /dev/null +++ b/crates/api-hrpc/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "lapdev-api-hrpc" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +description.workspace = true + +[dependencies] +uuid.workspace = true +serde.workspace = true +lapdev-common.workspace = true +lapdev-hrpc.workspace = true \ No newline at end of file diff --git a/crates/api-hrpc/src/lib.rs b/crates/api-hrpc/src/lib.rs new file mode 100644 index 0000000..c48bc11 --- /dev/null +++ b/crates/api-hrpc/src/lib.rs @@ -0,0 +1,322 @@ +use lapdev_common::{ + devbox::{ + DevboxEnvironmentSelection, DevboxPortMappingOverride, DevboxSessionSummary, + DevboxSessionWhoAmI, DevboxStartWorkloadInterceptResponse, + DevboxWorkloadInterceptListResponse, + }, + kube::{ + CreateKubeClusterResponse, CreateKubeEnvironmentPreviewUrlRequest, KubeAppCatalogWorkload, + KubeAppCatalogWorkloadCreate, KubeCluster, KubeContainerInfo, KubeEnvironmentPreviewUrl, + KubeEnvironmentService, KubeEnvironmentWorkload, KubeEnvironmentWorkloadDetail, + KubeNamespace, KubeNamespaceInfo, KubeWorkload, KubeWorkloadKind, KubeWorkloadList, + PagePaginationParams, PaginatedResult, PaginationParams, + UpdateKubeEnvironmentPreviewUrlRequest, + }, +}; +use uuid::Uuid; + +pub use lapdev_common::hrpc::HrpcError; +pub use lapdev_common::kube::{ + KubeAppCatalog, KubeEnvironment, KubeEnvironmentDashboardSummary, KubeEnvironmentStatusCount, +}; +// For backward compatibility +pub use lapdev_common::kube::KubeAppCatalog as AppCatalog; + +#[lapdev_hrpc::service] +pub trait HrpcService { + async fn devbox_session_get_session(&self) -> Result, HrpcError>; + + async fn devbox_session_revoke_session(&self, session_id: Uuid) -> Result<(), HrpcError>; + + async fn devbox_session_get_active_environment( + &self, + ) -> Result, HrpcError>; + + async fn devbox_session_set_active_environment( + &self, + environment_id: Uuid, + ) -> Result<(), HrpcError>; + + async fn devbox_session_whoami(&self) -> Result; + + async fn devbox_intercept_list( + &self, + environment_id: Uuid, + ) -> Result; + + async fn devbox_intercept_start( + &self, + workload_id: Uuid, + port_mappings: Vec, + ) -> Result; + + async fn devbox_intercept_stop(&self, intercept_id: Uuid) -> Result<(), HrpcError>; + + async fn all_kube_clusters(&self, org_id: Uuid) -> Result, HrpcError>; + + async fn create_kube_cluster( + &self, + org_id: Uuid, + name: String, + ) -> Result; + + async fn delete_kube_cluster(&self, org_id: Uuid, cluster_id: Uuid) -> Result<(), HrpcError>; + + async fn set_cluster_deployable( + &self, + org_id: Uuid, + cluster_id: Uuid, + can_deploy_personal: bool, + can_deploy_shared: bool, + ) -> Result<(), HrpcError>; + + async fn set_cluster_personal_deployable( + &self, + org_id: Uuid, + cluster_id: Uuid, + can_deploy: bool, + ) -> Result<(), HrpcError>; + + async fn set_cluster_shared_deployable( + &self, + org_id: Uuid, + cluster_id: Uuid, + can_deploy: bool, + ) -> Result<(), HrpcError>; + + async fn get_workloads( + &self, + org_id: Uuid, + cluster_id: Uuid, + namespace: Option, + workload_kind_filter: Option, + include_system_workloads: bool, + pagination: Option, + ) -> Result; + + async fn get_workload_details( + &self, + org_id: Uuid, + cluster_id: Uuid, + name: String, + namespace: String, + ) -> Result, HrpcError>; + + async fn get_cluster_namespaces( + &self, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result, HrpcError>; + + async fn get_cluster_info( + &self, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result; + + async fn create_app_catalog( + &self, + org_id: Uuid, + cluster_id: Uuid, + name: String, + description: Option, + workloads: Vec, + ) -> Result; + + async fn all_app_catalogs( + &self, + org_id: Uuid, + search: Option, + pagination: Option, + ) -> Result, HrpcError>; + + async fn get_app_catalog( + &self, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result; + + async fn get_app_catalog_workloads( + &self, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result, HrpcError>; + + async fn delete_app_catalog(&self, org_id: Uuid, catalog_id: Uuid) -> Result<(), HrpcError>; + + async fn delete_app_catalog_workload( + &self, + org_id: Uuid, + workload_id: Uuid, + ) -> Result<(), HrpcError>; + + async fn update_app_catalog_workload( + &self, + org_id: Uuid, + workload_id: Uuid, + containers: Vec, + ) -> Result<(), HrpcError>; + + async fn add_workloads_to_app_catalog( + &self, + org_id: Uuid, + catalog_id: Uuid, + workloads: Vec, + ) -> Result<(), HrpcError>; + + async fn create_kube_environment( + &self, + org_id: Uuid, + app_catalog_id: Uuid, + cluster_id: Uuid, + name: String, + is_shared: bool, + ) -> Result; + + async fn create_branch_environment( + &self, + org_id: Uuid, + base_environment_id: Uuid, + name: String, + ) -> Result; + + async fn all_kube_environments( + &self, + org_id: Uuid, + search: Option, + is_shared: bool, + is_branch: bool, + pagination: Option, + ) -> Result, HrpcError>; + + async fn get_environment_dashboard_summary( + &self, + org_id: Uuid, + recent_limit: Option, + ) -> Result; + + async fn get_kube_environment( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result; + + async fn delete_kube_environment( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError>; + + async fn pause_kube_environment( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError>; + + async fn resume_kube_environment( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError>; + + async fn sync_environment_from_catalog( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError>; + + async fn rebase_branch_environment( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError>; + + // Kube Environment Workload operations + async fn get_environment_workloads( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result, HrpcError>; + + async fn get_environment_services( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result, HrpcError>; + + async fn get_environment_workload( + &self, + org_id: Uuid, + workload_id: Uuid, + ) -> Result, HrpcError>; + + async fn get_environment_workload_detail( + &self, + org_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + ) -> Result; + + async fn update_environment_workload( + &self, + org_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + containers: Vec, + ) -> Result; + + async fn delete_environment_workload( + &self, + org_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + ) -> Result<(), HrpcError>; + + // Kube Namespace operations + async fn create_kube_namespace( + &self, + org_id: Uuid, + name: String, + description: Option, + is_shared: bool, + ) -> Result; + + async fn all_kube_namespaces( + &self, + org_id: Uuid, + is_shared: bool, + ) -> Result, HrpcError>; + + async fn delete_kube_namespace( + &self, + org_id: Uuid, + namespace_id: Uuid, + ) -> Result<(), HrpcError>; + + // Kube Environment Preview URL operations + async fn create_environment_preview_url( + &self, + org_id: Uuid, + environment_id: Uuid, + request: CreateKubeEnvironmentPreviewUrlRequest, + ) -> Result; + + async fn get_environment_preview_urls( + &self, + org_id: Uuid, + environment_id: Uuid, + ) -> Result, HrpcError>; + + async fn update_environment_preview_url( + &self, + org_id: Uuid, + preview_url_id: Uuid, + request: UpdateKubeEnvironmentPreviewUrlRequest, + ) -> Result; + + async fn delete_environment_preview_url( + &self, + org_id: Uuid, + preview_url_id: Uuid, + ) -> Result<(), HrpcError>; +} diff --git a/lapdev-api/Cargo.toml b/crates/api/Cargo.toml similarity index 74% rename from lapdev-api/Cargo.toml rename to crates/api/Cargo.toml index 098460b..dfc648e 100644 --- a/lapdev-api/Cargo.toml +++ b/crates/api/Cargo.toml @@ -7,6 +7,8 @@ license.workspace = true description.workspace = true [dependencies] +regex.workspace = true +tarpc.workspace = true itertools.workspace = true rustls-webpki.workspace = true include_dir.workspace = true @@ -14,6 +16,7 @@ toml.workspace = true clap.workspace = true rustls-pemfile.workspace = true tokio-rustls.workspace = true +tokio-util.workspace = true reqwest.workspace = true oauth2.workspace = true chrono.workspace = true @@ -30,21 +33,32 @@ tower.workspace = true tower-http.workspace = true anyhow.workspace = true tokio.workspace = true +tokio-tungstenite.workspace = true +tokio-stream = { version = "0.1.15", features = ["sync"] } futures.workspace = true futures-util.workspace = true serde.workspace = true serde_json.workspace = true +serde_yaml.workspace = true tracing.workspace = true tracing-appender.workspace = true tracing-subscriber.workspace = true uuid.workspace = true +secrecy.workspace = true sqlx.workspace = true sea-orm.workspace = true lapdev-rpc.workspace = true +lapdev-kube-rpc.workspace = true +lapdev-devbox-rpc.workspace = true lapdev-common.workspace = true +k8s-openapi.workspace = true +lapdev-api-hrpc.workspace = true lapdev-db.workspace = true +lapdev-db-entities.workspace = true lapdev-enterprise.workspace = true lapdev-conductor.workspace = true lapdev-proxy-ssh.workspace = true lapdev-proxy-http.workspace = true lapdev-kube.workspace = true +lapdev-tunnel.workspace = true +pin-project.workspace = true diff --git a/lapdev-api/pages/tailwind.config.js b/crates/api/pages/tailwind.config.js similarity index 100% rename from lapdev-api/pages/tailwind.config.js rename to crates/api/pages/tailwind.config.js diff --git a/lapdev-api/src/account.rs b/crates/api/src/account.rs similarity index 88% rename from lapdev-api/src/account.rs rename to crates/api/src/account.rs index 5852340..84e0b1f 100644 --- a/lapdev-api/src/account.rs +++ b/crates/api/src/account.rs @@ -1,11 +1,12 @@ -use std::{collections::HashMap, str::FromStr}; +use std::{collections::HashMap, str::FromStr, sync::Arc}; use axum::{ - extract::{Host, Path, Query, State}, + extract::{Path, Query, State}, response::{IntoResponse, Response}, Json, }; use axum_extra::{ + extract::Host, headers::{self, Cookie}, TypedHeader, }; @@ -15,7 +16,7 @@ use lapdev_common::{ console::{MeUser, NewSessionResponse, Organization}, GitProvider, NewSshKey, SshKey, UserRole, }; -use lapdev_db::{api::DbApi, entities}; +use lapdev_db::api::DbApi; use lapdev_rpc::error::ApiError; use russh::keys::PublicKeyBase64; use sea_orm::{prelude::Uuid, ActiveModelTrait, ActiveValue}; @@ -23,7 +24,7 @@ use sea_orm::{prelude::Uuid, ActiveModelTrait, ActiveValue}; use crate::{session::create_oauth_connection, state::CoreState}; pub async fn me( - State(state): State, + State(state): State>, TypedHeader(cookies): TypedHeader, ) -> Result { let user = state.authenticate(&cookies).await?; @@ -64,7 +65,7 @@ pub async fn me( } pub async fn set_current_organization( - State(state): State, + State(state): State>, Path(org_id): Path, TypedHeader(cookies): TypedHeader, ) -> Result { @@ -74,7 +75,7 @@ pub async fn set_current_organization( .get_organization_member(user.id, org_id) .await .map_err(|_| ApiError::Unauthorized)?; - entities::user::ActiveModel { + lapdev_db_entities::user::ActiveModel { id: ActiveValue::Set(user.id), current_organization: ActiveValue::Set(org_id), ..Default::default() @@ -85,11 +86,11 @@ pub async fn set_current_organization( } async fn user_current_organization( - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, db: &DbApi, ) -> anyhow::Result<( - entities::organization::Model, - entities::organization_member::Model, + lapdev_db_entities::organization::Model, + lapdev_db_entities::organization_member::Model, )> { if let Ok(r) = user_organization(user, user.current_organization, db).await { return Ok(r); @@ -98,12 +99,12 @@ async fn user_current_organization( } async fn user_organization( - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, org_id: Uuid, db: &DbApi, ) -> anyhow::Result<( - entities::organization::Model, - entities::organization_member::Model, + lapdev_db_entities::organization::Model, + lapdev_db_entities::organization_member::Model, )> { let org = db.get_organization(org_id).await?; let member = db.get_organization_member(user.id, org.id).await?; @@ -111,16 +112,16 @@ async fn user_organization( } async fn pick_user_organization( - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, db: &DbApi, ) -> anyhow::Result<( - entities::organization::Model, - entities::organization_member::Model, + lapdev_db_entities::organization::Model, + lapdev_db_entities::organization_member::Model, )> { let org_members = db.get_user_organizations(user.id).await?; for org_member in org_members { if let Ok(org) = db.get_organization(org_member.organization_id).await { - entities::user::ActiveModel { + lapdev_db_entities::user::ActiveModel { id: ActiveValue::Set(user.id), current_organization: ActiveValue::Set(org.id), ..Default::default() @@ -134,12 +135,12 @@ async fn pick_user_organization( } async fn all_user_organizations( - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, db: &DbApi, ) -> anyhow::Result< Vec<( - entities::organization::Model, - entities::organization_member::Model, + lapdev_db_entities::organization::Model, + lapdev_db_entities::organization_member::Model, )>, > { let mut result = Vec::new(); @@ -154,7 +155,7 @@ async fn all_user_organizations( pub async fn create_ssh_key( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Json(ssh_key): Json, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -179,7 +180,7 @@ pub async fn create_ssh_key( .map_err(|_| ApiError::InvalidRequest("The SSH public key is invalid".to_string()))?; let parsed_key = parsed_key.public_key_base64(); - let model = entities::ssh_public_key::ActiveModel { + let model = lapdev_db_entities::ssh_public_key::ActiveModel { id: ActiveValue::Set(Uuid::new_v4()), created_at: ActiveValue::Set(Utc::now().into()), name: ActiveValue::Set(name.to_string()), @@ -201,7 +202,7 @@ pub async fn create_ssh_key( } pub async fn all_ssh_keys( - State(state): State, + State(state): State>, TypedHeader(cookie): TypedHeader, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -223,7 +224,7 @@ pub async fn all_ssh_keys( pub async fn delete_ssh_key( TypedHeader(cookie): TypedHeader, Path(key_id): Path, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; let key = state.db.get_ssh_key(key_id).await?; @@ -231,7 +232,7 @@ pub async fn delete_ssh_key( return Err(ApiError::Unauthorized); } - entities::ssh_public_key::ActiveModel { + lapdev_db_entities::ssh_public_key::ActiveModel { id: ActiveValue::Set(key.id), deleted_at: ActiveValue::Set(Some(Utc::now().into())), ..Default::default() @@ -244,7 +245,7 @@ pub async fn delete_ssh_key( pub async fn get_git_providers( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; let all_oauths = state.db.get_user_all_oauth(user.id).await?; @@ -261,7 +262,7 @@ pub async fn get_git_providers( avatar_url: oauth.as_ref().and_then(|o| o.avatar_url.clone()), email: oauth.as_ref().and_then(|o| o.email.clone()), name: oauth.as_ref().and_then(|o| o.name.clone()), - read_repo: oauth.as_ref().map(|o| o.read_repo), + read_repo: oauth.as_ref().map(|o| o.read_repo.unwrap_or(false)), scopes: config.scopes.iter().map(|s| s.to_string()).collect(), all_scopes: config .read_repo_scopes @@ -279,7 +280,7 @@ pub async fn connect_git_provider( TypedHeader(cookie): TypedHeader, Host(hostname): Host, Query(query): Query>, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; let provider_name = query @@ -303,7 +304,7 @@ pub async fn update_scope( TypedHeader(cookie): TypedHeader, Host(hostname): Host, Query(query): Query>, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; let all_oauths = state.db.get_user_all_oauth(user.id).await?; @@ -339,7 +340,7 @@ pub async fn update_scope( pub async fn disconnect_git_provider( TypedHeader(cookie): TypedHeader, Query(query): Query>, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; let all_oauths = state.db.get_user_all_oauth(user.id).await?; @@ -358,7 +359,7 @@ pub async fn disconnect_git_provider( .await? .ok_or_else(|| ApiError::InvalidRequest("provider isn't connected".to_string()))?; - entities::oauth_connection::ActiveModel { + lapdev_db_entities::oauth_connection::ActiveModel { id: ActiveValue::Set(oauth.id), deleted_at: ActiveValue::Set(Some(Utc::now().into())), ..Default::default() diff --git a/lapdev-api/src/admin.rs b/crates/api/src/admin.rs similarity index 90% rename from lapdev-api/src/admin.rs rename to crates/api/src/admin.rs index c0692b2..2b676c9 100644 --- a/lapdev-api/src/admin.rs +++ b/crates/api/src/admin.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, str::FromStr}; +use std::{collections::HashMap, str::FromStr, sync::Arc}; use anyhow::anyhow; use axum::{ @@ -15,7 +15,6 @@ use lapdev_common::{ UpdateWorkspaceHost, WorkspaceHost, WorkspaceHostStatus, LAPDEV_BASE_HOSTNAME, }; use lapdev_conductor::scheduler::{self, LAPDEV_CPU_OVERCOMMIT}; -use lapdev_db::entities; use lapdev_rpc::error::ApiError; use sea_orm::{ ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, @@ -31,7 +30,7 @@ use crate::{ pub async fn update_license( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Json(new_license_key): Json, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; @@ -46,7 +45,7 @@ pub async fn update_license( pub async fn get_license( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; let license = state @@ -62,7 +61,7 @@ pub async fn get_license( } pub async fn new_license( - State(state): State, + State(state): State>, Json(new_license): Json, ) -> Result { let license = state @@ -81,7 +80,7 @@ pub async fn new_license( pub async fn get_workspace_hosts( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, ) -> Result>, ApiError> { state.authenticate_cluster_admin(&cookie).await?; let hosts = state.db.get_all_workspace_hosts().await?; @@ -110,7 +109,7 @@ pub async fn get_workspace_hosts( pub async fn create_workspace_host( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Json(new_workspace_host): Json, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; @@ -125,7 +124,7 @@ pub async fn create_workspace_host( new_workspace_host.host ))); } - let host = entities::workspace_host::ActiveModel { + let host = lapdev_db_entities::workspace_host::ActiveModel { id: ActiveValue::Set(Uuid::new_v4()), deleted_at: ActiveValue::Set(None), host: ActiveValue::Set(new_workspace_host.host), @@ -164,11 +163,11 @@ pub async fn create_workspace_host( pub async fn update_workspace_host( TypedHeader(cookie): TypedHeader, Path(id): Path, - State(state): State, + State(state): State>, Json(update_workspace_host): Json, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; - let host = entities::workspace_host::ActiveModel { + let host = lapdev_db_entities::workspace_host::ActiveModel { id: ActiveValue::Set(id), cpu: ActiveValue::Set(update_workspace_host.cpu as i32), memory: ActiveValue::Set(update_workspace_host.memory as i32), @@ -204,13 +203,13 @@ pub async fn update_workspace_host( pub async fn delete_workspace_host( TypedHeader(cookie): TypedHeader, Path(id): Path, - State(state): State, + State(state): State>, ) -> Result { state.authenticate_cluster_admin(&cookie).await?; let now = Utc::now(); - if entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::HostId.eq(id)) + if lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::HostId.eq(id)) .one(&state.db.conn) .await? .is_some() @@ -219,7 +218,7 @@ pub async fn delete_workspace_host( "You can't delete a workspace host with workspaces on it".to_string(), )); }; - entities::workspace_host::ActiveModel { + lapdev_db_entities::workspace_host::ActiveModel { id: ActiveValue::Set(id), deleted_at: ActiveValue::Set(Some(now.into())), ..Default::default() @@ -231,7 +230,7 @@ pub async fn delete_workspace_host( pub async fn get_oauth( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; let setting = OauthSettings { @@ -261,7 +260,7 @@ pub async fn get_oauth( pub async fn update_oauth( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Json(update_oauth): Json, ) -> Result { let is_cluster_initiated = { @@ -302,7 +301,7 @@ pub async fn update_oauth( pub async fn get_certs( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, ) -> Result>, ApiError> { state.authenticate_cluster_admin(&cookie).await?; let certs = state.db.get_config(LAPDEV_CERTS).await?; @@ -312,7 +311,7 @@ pub async fn get_certs( pub async fn update_certs( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Json(update_body): Json>, ) -> Result { state.authenticate_cluster_admin(&cookie).await?; @@ -322,7 +321,7 @@ pub async fn update_certs( } pub async fn get_auth_providers( - State(state): State, + State(state): State>, ) -> Result>, ApiError> { let providers: Vec = state .auth @@ -336,7 +335,7 @@ pub async fn get_auth_providers( } pub async fn get_cluster_info( - State(state): State, + State(state): State>, ) -> Result, ApiError> { let providers: Vec = state .auth @@ -378,15 +377,15 @@ pub struct FilterQuery { pub async fn get_cluster_users( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Query(filter_query): Query, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; - let mut query = entities::user::Entity::find() - .filter(entities::user::Column::DeletedAt.is_null()) - .order_by_desc(entities::user::Column::CreatedAt); + let mut query = lapdev_db_entities::user::Entity::find() + .filter(lapdev_db_entities::user::Column::DeletedAt.is_null()) + .order_by_desc(lapdev_db_entities::user::Column::CreatedAt); if let Some(filter) = filter_query.filter { - query = query.filter(entities::user::Column::Name.like(format!("%{filter}%"))); + query = query.filter(lapdev_db_entities::user::Column::Name.like(format!("%{filter}%"))); } let page_size = filter_query.page_size.unwrap_or(10); let page = filter_query.page.unwrap_or(0); @@ -419,7 +418,7 @@ pub async fn get_cluster_users( pub async fn update_cluster_user( TypedHeader(cookie): TypedHeader, Path(user_id): Path, - State(state): State, + State(state): State>, Json(update_cluter_user): Json, ) -> Result { state.authenticate_cluster_admin(&cookie).await?; @@ -430,10 +429,10 @@ pub async fn update_cluster_user( .ok_or_else(|| ApiError::InvalidRequest("user doesn't exist".to_string()))?; if !update_cluter_user.cluster_admin { // check if it's the last cluster admin if we're trying to change it to false - if entities::user::Entity::find() - .filter(entities::user::Column::DeletedAt.is_null()) - .filter(entities::user::Column::ClusterAdmin.eq(true)) - .filter(entities::user::Column::Id.ne(user.id)) + if lapdev_db_entities::user::Entity::find() + .filter(lapdev_db_entities::user::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::user::Column::ClusterAdmin.eq(true)) + .filter(lapdev_db_entities::user::Column::Id.ne(user.id)) .one(&state.db.conn) .await? .is_none() @@ -443,7 +442,7 @@ pub async fn update_cluster_user( )); } } - entities::user::ActiveModel { + lapdev_db_entities::user::ActiveModel { id: ActiveValue::Set(user.id), cluster_admin: ActiveValue::Set(update_cluter_user.cluster_admin), ..Default::default() @@ -455,7 +454,7 @@ pub async fn update_cluster_user( pub async fn get_cpu_overcommit( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, ) -> Result { state.require_enterprise().await?; state.authenticate_cluster_admin(&cookie).await?; @@ -466,7 +465,7 @@ pub async fn get_cpu_overcommit( pub async fn update_cpu_overcommit( TypedHeader(cookie): TypedHeader, Path(value): Path, - State(state): State, + State(state): State>, ) -> Result { state.require_enterprise().await?; state.authenticate_cluster_admin(&cookie).await?; @@ -484,7 +483,7 @@ pub async fn update_cpu_overcommit( } pub async fn get_hostnames( - State(state): State, + State(state): State>, ) -> Result>, ApiError> { let mut hostnames = state .conductor @@ -502,7 +501,7 @@ pub async fn get_hostnames( pub async fn update_hostnames( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, Json(update): Json>, ) -> Result { state.authenticate_cluster_admin(&cookie).await?; diff --git a/crates/api/src/app_catalog_events.rs b/crates/api/src/app_catalog_events.rs new file mode 100644 index 0000000..b1e85f6 --- /dev/null +++ b/crates/api/src/app_catalog_events.rs @@ -0,0 +1,126 @@ +use std::{convert::Infallible, sync::Arc, time::Duration}; + +use axum::{ + extract::{Path, State}, + response::sse::{Event, KeepAlive, Sse}, +}; +use axum_extra::{headers, TypedHeader}; +use chrono::Utc; +use futures::{stream, Stream, StreamExt}; +use lapdev_common::kube::AppCatalogStatusEvent; +use lapdev_rpc::error::ApiError; +use tokio_stream::wrappers::BroadcastStream; +use tracing::warn; +use uuid::Uuid; + +use crate::state::CoreState; + +pub async fn stream_app_catalog_events( + Path((org_id, catalog_id)): Path<(Uuid, Uuid)>, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let catalog = state + .db + .get_app_catalog(catalog_id) + .await? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + let initial_event = AppCatalogStatusEvent { + organization_id: catalog.organization_id, + catalog_id, + cluster_id: catalog.cluster_id, + sync_version: catalog.sync_version, + last_synced_at: catalog.last_synced_at.map(|dt| dt.to_string()), + last_sync_actor_id: catalog.last_sync_actor_id, + updated_at: catalog + .last_synced_at + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(Utc::now), + }; + + let receiver = state.app_catalog_events.subscribe(); + let initial_stream = stream::iter( + build_app_catalog_sse_event(&initial_event) + .into_iter() + .map(Ok::), + ); + + let target_org = org_id; + let target_catalog = catalog_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| async move { + match result { + Ok(event) + if event.organization_id == target_org && event.catalog_id == target_catalog => + { + build_app_catalog_sse_event(&event).map(Ok) + } + Ok(_) => None, + Err(err) => { + warn!("app catalog event stream lagged: {err}"); + None + } + } + }); + + let stream = initial_stream.chain(event_stream); + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(stream).keep_alive(keep_alive)) +} + +pub async fn stream_organization_app_catalog_events( + Path(org_id): Path, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let receiver = state.app_catalog_events.subscribe(); + let target_org = org_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| { + let target_org = target_org; + async move { + match result { + Ok(event) if event.organization_id == target_org => { + build_app_catalog_sse_event(&event).map(Ok) + } + Ok(_) => None, + Err(err) => { + warn!("app catalog event stream lagged: {err}"); + None + } + } + } + }); + + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(event_stream).keep_alive(keep_alive)) +} + +fn build_app_catalog_sse_event(event: &AppCatalogStatusEvent) -> Option { + Event::default().event("catalog").json_data(event).ok() +} diff --git a/lapdev-api/src/auth.rs b/crates/api/src/auth.rs similarity index 83% rename from lapdev-api/src/auth.rs rename to crates/api/src/auth.rs index bf887e0..b0e6bb3 100644 --- a/lapdev-api/src/auth.rs +++ b/crates/api/src/auth.rs @@ -5,8 +5,9 @@ use lapdev_common::AuthProvider; use lapdev_db::api::DbApi; use oauth2::{ basic::BasicClient, AccessToken, AuthUrl, AuthorizationCode, ClientId, ClientSecret, - RedirectUrl, TokenResponse, TokenUrl, + EndpointNotSet, EndpointSet, RedirectUrl, TokenResponse, TokenUrl, }; +use reqwest::redirect::Policy; use tokio::sync::RwLock; use crate::gitlab::GitlabClient; @@ -42,8 +43,11 @@ impl AuthConfig { }; } +type ConfiguredOAuthClient = + BasicClient; + pub struct Auth { - pub clients: RwLock>, + pub clients: RwLock>, pub gitlab_client: GitlabClient, } @@ -60,7 +64,7 @@ impl Auth { *self.clients.write().await = clients; } - async fn get_clients(db: &DbApi) -> HashMap { + async fn get_clients(db: &DbApi) -> HashMap { let mut clients = HashMap::new(); if let Ok(client) = Self::build_auth(db, &AuthConfig::GITHUB).await { clients.insert(AuthProvider::Github, (client, AuthConfig::GITHUB)); @@ -71,19 +75,17 @@ impl Auth { clients } - async fn build_auth(db: &DbApi, config: &AuthConfig) -> Result { + async fn build_auth(db: &DbApi, config: &AuthConfig) -> Result { let client_id = db.get_config(config.client_id).await?; let client_secret = db.get_config(config.client_secret).await?; if client_id.trim().is_empty() || client_secret.trim().is_empty() { return Err(anyhow::anyhow!("client id or secret is empty")); } - let client = BasicClient::new( - ClientId::new(client_id), - Some(ClientSecret::new(client_secret)), - AuthUrl::new(config.auth_url.to_string())?, - Some(TokenUrl::new(config.token_url.to_string())?), - ); + let client = BasicClient::new(ClientId::new(client_id)) + .set_client_secret(ClientSecret::new(client_secret)) + .set_auth_uri(AuthUrl::new(config.auth_url.to_string())?) + .set_token_uri(TokenUrl::new(config.token_url.to_string())?); Ok(client) } @@ -121,10 +123,13 @@ impl Auth { let (client, _) = clients .get(provider) .ok_or_else(|| anyhow::anyhow!("can't find provider"))?; + let http_client = reqwest::Client::builder() + .redirect(Policy::none()) + .build()?; let token = match client .exchange_code(AuthorizationCode::new(code)) .set_redirect_uri(Cow::Borrowed(&RedirectUrl::new(redirect_url)?)) - .request_async(oauth2::reqwest::async_http_client) + .request_async(&http_client) .await { Ok(t) => t, diff --git a/lapdev-api/src/cert.rs b/crates/api/src/cert.rs similarity index 99% rename from lapdev-api/src/cert.rs rename to crates/api/src/cert.rs index d227356..104fdc5 100644 --- a/lapdev-api/src/cert.rs +++ b/crates/api/src/cert.rs @@ -15,6 +15,7 @@ use webpki::EndEntityCert; pub type CertStore = Arc>>>>; +#[allow(dead_code)] pub fn tls_config(certs: CertStore) -> Result { let mut config = ServerConfig::builder() .with_no_client_auth() diff --git a/crates/api/src/cli_auth.rs b/crates/api/src/cli_auth.rs new file mode 100644 index 0000000..9ce96c4 --- /dev/null +++ b/crates/api/src/cli_auth.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use axum::{ + extract::{Query, State}, + http::StatusCode, + response::{IntoResponse, Response}, + Json, +}; +use axum_extra::{headers, TypedHeader}; +use chrono::Utc; +use lapdev_rpc::error::ApiError; +use pasetors::claims::Claims; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::state::CoreState; + +#[derive(Debug, Deserialize)] +pub struct GenerateTokenRequest { + pub session_id: Uuid, + pub device_name: String, +} + +#[derive(Debug, Deserialize)] +pub struct CliTokenQuery { + pub session_id: Uuid, +} + +#[derive(Debug, Serialize)] +pub struct CliTokenResponse { + pub token: String, + pub expires_in: u32, // seconds (30 days = 2592000) +} + +/// POST /api/v1/auth/cli/generate-token - Generate CLI token for already logged-in users +/// This is called by the Leptos frontend when user is already authenticated +pub async fn generate_cli_token( + TypedHeader(cookie): TypedHeader, + State(state): State>, + Json(req): Json, +) -> Result, ApiError> { + // Authenticate the browser session + let user = state.authenticate(&cookie).await?; + + // Create PASETO token for CLI + let mut cli_claims = Claims::new_expires_in(&core::time::Duration::from_secs(86400 * 30))?; + cli_claims.add_additional("user_id", user.id.to_string())?; + cli_claims.add_additional("organization_id", user.current_organization.to_string())?; + cli_claims.add_additional("session_type", "cli")?; + cli_claims.add_additional("device_name", req.device_name.clone())?; + cli_claims.add_additional("session_id", req.session_id.to_string())?; + let cli_token = pasetors::local::encrypt(&state.auth_token_key, &cli_claims, None, None)?; + + // Store in pending_cli_auth map (5-min TTL) + state.pending_cli_auth.write().await.insert( + req.session_id, + crate::state::PendingCliAuth { + token: cli_token, + expires_at: Utc::now() + chrono::Duration::seconds(300), + }, + ); + + Ok(Json(CliTokenResponse { + token: "generated".to_string(), // Don't return actual token, CLI will poll for it + expires_in: 2592000, + })) +} + +/// GET /api/v1/auth/cli/token - Poll for CLI auth token +/// Returns 404 if token not ready, or the token if authentication completed +pub async fn cli_token_poll( + Query(params): Query, + State(state): State>, +) -> Result { + let pending = state.pending_cli_auth.read().await; + + if let Some(auth) = pending.get(¶ms.session_id) { + if auth.expires_at > Utc::now() { + // Remove from pending (single use) + let token = auth.token.clone(); + drop(pending); + state + .pending_cli_auth + .write() + .await + .remove(¶ms.session_id); + + return Ok(Json(CliTokenResponse { + token, + expires_in: 2592000, // 30 days + }) + .into_response()); + } + } + + Ok(StatusCode::NOT_FOUND.into_response()) +} diff --git a/crates/api/src/cluster_events.rs b/crates/api/src/cluster_events.rs new file mode 100644 index 0000000..81a0321 --- /dev/null +++ b/crates/api/src/cluster_events.rs @@ -0,0 +1,130 @@ +use std::{convert::Infallible, str::FromStr, sync::Arc, time::Duration}; + +use axum::{ + extract::{Path, State}, + response::sse::{Event, KeepAlive, Sse}, +}; +use axum_extra::{headers, TypedHeader}; +use chrono::Utc; +use futures::{stream, Stream, StreamExt}; +use lapdev_common::kube::{ClusterStatusEvent, KubeClusterStatus}; +use lapdev_rpc::error::ApiError; +use tokio_stream::wrappers::BroadcastStream; +use tracing::warn; +use uuid::Uuid; + +use crate::state::CoreState; + +pub async fn stream_cluster_status_events( + Path((org_id, cluster_id)): Path<(Uuid, Uuid)>, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let cluster = state + .db + .get_kube_cluster(cluster_id) + .await? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + let status = + KubeClusterStatus::from_str(&cluster.status).unwrap_or(KubeClusterStatus::Provisioning); + let updated_at = cluster + .last_reported_at + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(Utc::now); + + let initial_event = ClusterStatusEvent { + organization_id: cluster.organization_id, + cluster_id, + status, + cluster_version: cluster.cluster_version.clone(), + region: cluster.region.clone(), + updated_at, + }; + + let receiver = state.cluster_events.subscribe(); + + let initial_stream = stream::iter( + build_cluster_sse_event(&initial_event) + .into_iter() + .map(Ok::), + ); + + let target_org = org_id; + let target_cluster = cluster_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| async move { + match result { + Ok(event) + if event.organization_id == target_org && event.cluster_id == target_cluster => + { + build_cluster_sse_event(&event).map(Ok) + } + Ok(_) => None, + Err(err) => { + warn!("cluster event stream lagged: {err}"); + None + } + } + }); + + let stream = initial_stream.chain(event_stream); + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(stream).keep_alive(keep_alive)) +} + +pub async fn stream_organization_cluster_status_events( + Path(org_id): Path, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let receiver = state.cluster_events.subscribe(); + let target_org = org_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| { + let target_org = target_org; + async move { + match result { + Ok(event) if event.organization_id == target_org => { + build_cluster_sse_event(&event).map(Ok) + } + Ok(_) => None, + Err(err) => { + warn!("cluster event stream lagged: {err}"); + None + } + } + } + }); + + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(event_stream).keep_alive(keep_alive)) +} + +fn build_cluster_sse_event(event: &ClusterStatusEvent) -> Option { + Event::default().event("cluster").json_data(event).ok() +} diff --git a/crates/api/src/devbox.rs b/crates/api/src/devbox.rs new file mode 100644 index 0000000..15b6958 --- /dev/null +++ b/crates/api/src/devbox.rs @@ -0,0 +1,1093 @@ +use std::{collections::BTreeMap, net::SocketAddr, sync::Arc}; + +use axum::{ + extract::{ws::WebSocket, Path, Query, State, WebSocketUpgrade}, + http::HeaderMap, + response::Response, + Json, +}; +use axum_extra::{headers, TypedHeader}; +use futures::StreamExt; +use lapdev_common::devbox::DirectTunnelConfig; +use lapdev_devbox_rpc::{ + DevboxClientRpcClient, DevboxInterceptRpc, DevboxSessionInfo, DevboxSessionRpc, PortMapping, +}; +use lapdev_rpc::{error::ApiError, spawn_twoway}; +use lapdev_tunnel::{ + relay_client_addr, relay_server_addr, run_tunnel_server_with_connector, + websocket_serde_transport_from_socket, DynTunnelStream, RelayEndpoint, TunnelError, + TunnelTarget, WebSocketUdpSocket, +}; +use serde::{Deserialize, Serialize}; +use tarpc::server::{BaseChannel, Channel}; +use uuid::Uuid; + +use crate::{ + devbox_tunnels::split_axum_websocket, state::CoreState, websocket_socket::AxumBinarySocket, +}; + +#[derive(Debug, Serialize)] +pub struct WhoamiResponse { + pub user_id: Uuid, + pub email: String, + pub name: Option, + pub organization_id: Uuid, + pub device_name: String, + pub authenticated_at: Option, + pub expires_at: Option, +} + +/// GET /api/v1/devbox/whoami - Get current CLI session info +pub async fn devbox_whoami( + State(state): State>, + TypedHeader(auth): TypedHeader>, +) -> Result, ApiError> { + let ctx = state.authenticate_bearer(&auth).await?; + + // Extract timestamps from token claims if available + let authenticated_at = ctx + .token_claims + .get_claim("iat") + .and_then(|v| serde_json::from_value::(v.clone()).ok()); + + let expires_at = ctx + .token_claims + .get_claim("exp") + .and_then(|v| serde_json::from_value::(v.clone()).ok()); + + Ok(Json(WhoamiResponse { + user_id: ctx.user.id, + email: ctx.user.email.unwrap_or_default(), + name: ctx.user.name, + organization_id: ctx.organization_id, + device_name: ctx.device_name, + authenticated_at, + expires_at, + })) +} + +/// WebSocket endpoint for devbox RPC connections (control plane) +pub async fn devbox_rpc_websocket( + websocket: WebSocketUpgrade, + headers: HeaderMap, + State(state): State>, +) -> Result { + tracing::debug!("Handling devbox RPC WebSocket connection"); + + // Extract Bearer token from Authorization header + let auth_header = headers + .get("Authorization") + .ok_or(ApiError::Unauthenticated)? + .to_str() + .map_err(|_| ApiError::Unauthenticated)?; + + if !auth_header.starts_with("Bearer ") { + return Err(ApiError::Unauthenticated); + } + + let token = &auth_header[7..]; // Skip "Bearer " + + // Authenticate using the same method as regular devbox requests + let bearer = headers::Authorization::bearer(token).map_err(|_| ApiError::Unauthenticated)?; + let ctx = state.authenticate_bearer(&bearer).await?; + + tracing::info!( + "Devbox RPC WebSocket authenticated for user {} ({})", + ctx.user.id, + ctx.device_name + ); + + // Extract expiration time from token claims + let expires_at = ctx + .token_claims + .get_claim("exp") + .and_then(|v| serde_json::from_value::(v.clone()).ok()) + .and_then(|s| chrono::DateTime::parse_from_rfc3339(&s).ok()) + .map(|dt| dt.with_timezone(&chrono::Utc)) + .unwrap_or_else(|| chrono::Utc::now() + chrono::Duration::days(30)); + + // Check for existing active sessions and notify them before displacing + if let Some(existing_handles) = state + .active_devbox_sessions + .read() + .await + .get(&ctx.user.id) + .map(|entries| entries.values().cloned().collect::>()) + { + for old_handle in existing_handles { + tracing::info!( + "Displacing existing session {} on device {} for user {}", + old_handle.session_id, + old_handle.device_name, + ctx.user.id + ); + + let _ = old_handle + .notify_tx + .send(crate::state::DevboxSessionNotification::Displaced { + new_device_name: ctx.device_name.clone(), + }); + } + } + + // Create or update devbox session + state + .db + .create_or_update_devbox_session( + ctx.user.id, + token, + ctx.device_name.clone(), + expires_at.into(), + ) + .await + .map_err(|e| ApiError::InternalError(format!("Failed to create session: {}", e)))?; + + tracing::info!( + "Created devbox session for user {} on device {}", + ctx.user.id, + ctx.device_name + ); + + Ok(handle_devbox_rpc_upgrade( + websocket, + state, + ctx.session_id, + ctx.user.id, + ctx.organization_id, + ctx.device_name, + )) +} + +fn handle_devbox_rpc_upgrade( + websocket: WebSocketUpgrade, + state: Arc, + session_id: Uuid, + user_id: Uuid, + organization_id: Uuid, + device_name: String, +) -> Response { + websocket + .on_failed_upgrade(|e| tracing::error!("devbox RPC websocket upgrade failed {e:?}")) + .on_upgrade(move |socket| async move { + handle_devbox_rpc( + socket, + state, + session_id, + user_id, + organization_id, + device_name, + ) + .await; + }) +} + +async fn handle_devbox_rpc( + socket: WebSocket, + state: Arc, + session_id: Uuid, + user_id: Uuid, + organization_id: Uuid, + device_name: String, +) { + let socket = AxumBinarySocket::new(socket); + let transport = websocket_serde_transport_from_socket( + socket, + tarpc::tokio_serde::formats::Bincode::default(), + ); + let (server_chan, client_chan, _abort_handle) = spawn_twoway(transport); + + // Create RPC client (for calling CLI methods) + let rpc_client = + DevboxClientRpcClient::new(tarpc::client::Config::default(), client_chan).spawn(); + + // Create notification channel for session migration + let (notify_tx, mut notify_rx) = tokio::sync::mpsc::unbounded_channel(); + + // Register this session as active + let connection_id = Uuid::new_v4(); + { + let mut sessions = state.active_devbox_sessions.write().await; + sessions + .entry(user_id) + .or_insert_with(BTreeMap::new) + .insert( + connection_id, + crate::state::DevboxSessionHandle { + session_id, + device_name: device_name.clone(), + notify_tx, + rpc_client: rpc_client.clone(), + connection_id, + }, + ); + } + + // Create RPC server (for CLI to call our methods) + let rpc_server = DevboxSessionRpcServer::new( + session_id, + user_id, + organization_id, + device_name.clone(), + rpc_client.clone(), + state.clone(), + ); + + tracing::info!( + "Starting DevboxSessionRpc server for session {} user {} device {}", + session_id, + user_id, + device_name + ); + + // Run RPC server and notification listener concurrently + let rpc_client_for_notifications = rpc_client.clone(); + let notification_task = tokio::spawn(async move { + while let Some(notification) = notify_rx.recv().await { + match notification { + crate::state::DevboxSessionNotification::Displaced { new_device_name } => { + tracing::info!( + "Session {} displaced by new login from {}", + session_id, + new_device_name + ); + // Notify the CLI that it's been displaced + let _ = rpc_client_for_notifications + .session_displaced(tarpc::context::current(), new_device_name) + .await; + } + } + } + }); + + // Run the RPC server + BaseChannel::with_defaults(server_chan) + .execute(rpc_server.serve()) + .for_each(|resp| async move { + tokio::spawn(resp); + }) + .await; + + // Cleanup: unregister session and stop notification listener + { + let mut sessions = state.active_devbox_sessions.write().await; + if let Some(entries) = sessions.get_mut(&user_id) { + entries.remove(&connection_id); + if entries.is_empty() { + sessions.remove(&user_id); + } + } + } + notification_task.abort(); + + if let Ok(Some(session)) = state.db.get_active_devbox_session(user_id).await { + if let Some(environment_id) = session.active_environment_id { + let state_clone = state.clone(); + tokio::spawn(async move { + state_clone + .clear_devbox_routes_for_environment(environment_id) + .await; + }); + } + } + + // Mark session as revoked in database + if let Err(e) = state.db.revoke_active_devbox_session(user_id).await { + tracing::error!( + "Failed to revoke session {} on disconnect: {}", + session_id, + e + ); + } + + tracing::info!( + "DevboxSessionRpc server stopped for session {} user {} device {} (session revoked)", + session_id, + user_id, + device_name + ); +} + +#[derive(Default, Deserialize)] +pub struct DevboxInterceptTunnelParams { + _workload_id: Option, +} + +/// WebSocket endpoint for devbox intercept tunnels (server side - receives connections from in-cluster services) +pub async fn devbox_intercept_tunnel_websocket( + Path(requested_user_id): Path, + Query(_params): Query, + websocket: WebSocketUpgrade, + headers: HeaderMap, + State(state): State>, +) -> Result { + tracing::debug!("Handling devbox tunnel WebSocket connection"); + + let auth_header = headers + .get("Authorization") + .ok_or(ApiError::Unauthenticated)? + .to_str() + .map_err(|_| ApiError::Unauthenticated)?; + + if !auth_header.starts_with("Bearer ") { + return Err(ApiError::Unauthenticated); + } + + let token = &auth_header[7..]; + + let session = state + .db + .get_devbox_session_by_token_hash(token) + .await + .map_err(|e| ApiError::InternalError(format!("Failed to load devbox session: {}", e)))? + .ok_or(ApiError::Unauthenticated)?; + + if session.user_id != requested_user_id { + tracing::warn!( + "Devbox user mismatch: token user {} vs path {}", + session.user_id, + requested_user_id + ); + return Err(ApiError::Unauthenticated); + } + + state + .db + .update_devbox_session_last_used(session.user_id) + .await + .map_err(|e| ApiError::InternalError(format!("Failed to update session usage: {}", e)))?; + + tracing::info!( + "Devbox tunnel WebSocket authenticated for user {} ({})", + session.user_id, + session.device_name + ); + + let user_id = session.user_id; + let registry = state.devbox_tunnels.clone(); + + let token_string = token.to_string(); + + Ok(websocket.on_upgrade(move |socket| { + let registry = registry.clone(); + async move { + registry.register_cli(user_id, token_string, socket).await; + } + })) +} + +/// WebSocket endpoint for devbox client tunnels (client side - connects to in-cluster services via devbox-proxy) +pub async fn devbox_client_tunnel_websocket( + Path(requested_user_id): Path, + websocket: WebSocketUpgrade, + headers: HeaderMap, + State(state): State>, +) -> Result { + tracing::debug!("Handling devbox client tunnel WebSocket connection"); + + let auth_header = headers + .get("Authorization") + .ok_or(ApiError::Unauthenticated)? + .to_str() + .map_err(|_| ApiError::Unauthenticated)?; + + if !auth_header.starts_with("Bearer ") { + return Err(ApiError::Unauthenticated); + } + + let token = &auth_header[7..]; + + let session = state + .db + .get_devbox_session_by_token_hash(token) + .await + .map_err(|e| ApiError::InternalError(format!("Failed to load devbox session: {}", e)))? + .ok_or(ApiError::Unauthenticated)?; + + if session.user_id != requested_user_id { + tracing::warn!( + "Devbox user mismatch: token user {} vs path {}", + session.user_id, + requested_user_id + ); + return Err(ApiError::Unauthenticated); + } + + state + .db + .update_devbox_session_last_used(session.user_id) + .await + .map_err(|e| ApiError::InternalError(format!("Failed to update session usage: {}", e)))?; + + tracing::info!( + "Devbox client tunnel WebSocket authenticated for user {} ({})", + session.user_id, + session.device_name + ); + + let user_id = session.user_id; + + let environment_id = session.active_environment_id; + + let environment = match environment_id { + Some(environment_id) => state.db.get_kube_environment(environment_id).await?, + None => None, + }; + + let cluster_id = environment.map(|e| e.cluster_id); + tracing::info!("Devbox client tunnel for user {user_id} targeting environment {environment_id:?} cluster {cluster_id:?}"); + + let tunnel_registry = state.kube_controller.tunnel_registry.clone(); + + Ok(websocket.on_upgrade(move |socket| { + let registry = tunnel_registry.clone(); + let cluster_id = cluster_id; + let user_id = user_id; + async move { + let (sink, stream) = split_axum_websocket(socket); + let udp_socket = WebSocketUdpSocket::from_parts( + sink, + stream, + relay_server_addr(), + relay_client_addr(), + ); + + let connector = move |target: TunnelTarget| { + let registry = registry.clone(); + async move { + let Some(cluster_id) = cluster_id else { + return Err(TunnelError::Remote("active environment is not set".into())); + }; + + match registry.get_client(cluster_id).await { + Some(cluster_client) if !cluster_client.is_closed() => { + let TunnelTarget { host, port } = target; + let stream = cluster_client.connect_tcp(host, port).await?; + Ok::(Box::new(stream) as DynTunnelStream) + } + Some(_) => { + tracing::warn!( + user_id = %user_id, + cluster_id = %cluster_id, + "Cluster tunnel is closed; rejecting devbox client request" + ); + Err(TunnelError::Remote("cluster tunnel is closed".into())) + } + None => { + tracing::warn!( + user_id = %user_id, + cluster_id = %cluster_id, + "No cluster tunnel available for devbox client request" + ); + Err(TunnelError::Remote("cluster tunnel unavailable".into())) + } + } + } + }; + + match RelayEndpoint::udp_server_connection(udp_socket).await { + Ok(connection) => { + if let Err(err) = run_tunnel_server_with_connector(connection, connector).await + { + tracing::warn!( + user_id = %user_id, + error = %err, + "Devbox client tunnel terminated with error" + ); + } + } + Err(err) => { + tracing::warn!( + user_id = %user_id, + error = %err, + "Failed to establish QUIC relay for devbox client tunnel" + ); + } + } + } + })) +} + +#[derive(Clone)] +struct DevboxSessionRpcServer { + session_id: Uuid, + user_id: Uuid, + #[allow(dead_code)] + organization_id: Uuid, + #[allow(dead_code)] + device_name: String, + #[allow(dead_code)] + client_rpc: DevboxClientRpcClient, + state: Arc, +} + +impl DevboxSessionRpcServer { + fn new( + session_id: Uuid, + user_id: Uuid, + organization_id: Uuid, + device_name: String, + client_rpc: DevboxClientRpcClient, + state: Arc, + ) -> Self { + Self { + session_id, + user_id, + organization_id, + device_name, + client_rpc, + state, + } + } +} + +impl DevboxSessionRpc for DevboxSessionRpcServer { + async fn whoami(self, _context: tarpc::context::Context) -> Result { + tracing::info!("whoami called for session {}", self.session_id); + + let user = self + .state + .db + .get_user(self.user_id) + .await + .map_err(|e| format!("Failed to fetch user: {}", e))? + .ok_or_else(|| "User not found".to_string())?; + + let session = self + .state + .db + .get_active_devbox_session(self.user_id) + .await + .map_err(|e| format!("Failed to fetch session: {}", e))? + .ok_or_else(|| "Session not found".to_string())?; + + if let Some(environment_id) = session.active_environment_id { + let state = self.state.clone(); + let user_id = self.user_id; + tokio::spawn(async move { + state.push_devbox_routes(user_id, environment_id).await; + }); + } + + Ok(DevboxSessionInfo { + user_id: self.user_id, + email: user.email.unwrap_or_default(), + device_name: session.device_name, + created_at: session.created_at.with_timezone(&chrono::Utc), + expires_at: session.expires_at.with_timezone(&chrono::Utc), + last_used_at: session.last_used_at.with_timezone(&chrono::Utc), + }) + } + + async fn get_active_environment( + self, + _context: tarpc::context::Context, + ) -> Result, String> { + tracing::info!( + "get_active_environment called for session {}", + self.session_id + ); + + let session = self + .state + .db + .get_active_devbox_session(self.user_id) + .await + .map_err(|e| format!("Failed to fetch session: {}", e))? + .ok_or_else(|| "Session not found".to_string())?; + + if let Some(env_id) = session.active_environment_id { + let environment = self + .state + .db + .get_kube_environment(env_id) + .await + .map_err(|e| format!("Failed to fetch environment: {}", e))? + .ok_or_else(|| "Environment not found".to_string())?; + + let cluster = self + .state + .db + .get_kube_cluster(environment.cluster_id) + .await + .map_err(|e| format!("Failed to fetch cluster: {}", e))? + .ok_or_else(|| "Cluster not found".to_string())?; + + Ok(Some(lapdev_devbox_rpc::DevboxEnvironmentInfo { + environment_id: env_id, + environment_name: environment.name.clone(), + cluster_name: cluster.name, + namespace: environment.namespace, + })) + } else { + Ok(None) + } + } + + async fn update_device_name( + self, + _context: tarpc::context::Context, + device_name: String, + ) -> Result<(), String> { + tracing::info!("update_device_name called for session {}", self.session_id); + + self.state + .db + .update_devbox_session_device_name(self.user_id, device_name.clone()) + .await + .map_err(|e| format!("Failed to update device name: {}", e))?; + + { + let mut sessions = self.state.active_devbox_sessions.write().await; + if let Some(entries) = sessions.get_mut(&self.user_id) { + if let Some(handle) = entries + .values_mut() + .find(|handle| handle.session_id == self.session_id) + { + handle.device_name = device_name.clone(); + } + } + } + + tracing::info!( + "Updated device name for session {} to {}", + self.session_id, + device_name + ); + + Ok(()) + } + + async fn heartbeat(self, _context: tarpc::context::Context) -> Result<(), String> { + tracing::trace!("Heartbeat received for session {}", self.session_id); + Ok(()) + } + + async fn set_active_environment( + self, + _context: tarpc::context::Context, + environment_id: Uuid, + ) -> Result<(), String> { + tracing::info!( + "set_active_environment called for session {} with environment {}", + self.session_id, + environment_id + ); + + let previous_environment = self + .state + .db + .get_active_devbox_session(self.user_id) + .await + .map_err(|e| format!("Failed to fetch session: {}", e))? + .and_then(|session| session.active_environment_id); + + let environment = self + .state + .db + .get_kube_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch environment: {}", e))? + .ok_or_else(|| "Environment not found".to_string())?; + + if environment.is_shared { + return Err("Shared environments cannot be used as active devbox environments".into()); + } + + self.state + .db + .update_devbox_session_active_environment(self.user_id, Some(environment_id)) + .await + .map_err(|e| format!("Failed to update active environment: {}", e))?; + + let cluster = self + .state + .db + .get_kube_cluster(environment.cluster_id) + .await + .map_err(|e| format!("Failed to fetch cluster: {}", e))? + .ok_or_else(|| "Cluster not found".to_string())?; + + // Notify client about the environment change + let env_info = lapdev_devbox_rpc::DevboxEnvironmentInfo { + environment_id: environment.id, + environment_name: environment.name.clone(), + cluster_name: cluster.name, + namespace: environment.namespace.clone(), + }; + + // Fire and forget - don't wait for client response + let client_rpc = self.client_rpc.clone(); + tracing::info!( + "Spawning task to notify CLI about environment change to {} ({})", + env_info.cluster_name, + env_info.namespace + ); + tokio::spawn(async move { + tracing::info!("Calling environment_changed RPC on client..."); + match client_rpc + .environment_changed(tarpc::context::current(), Some(env_info.clone())) + .await + { + Ok(_) => { + tracing::info!( + "Successfully notified client of environment change to {}", + env_info.namespace + ); + } + Err(e) => { + tracing::warn!("Failed to notify client of environment change: {}", e); + } + } + }); + + let state = self.state.clone(); + let user_id = self.user_id; + tokio::spawn(async move { + state.push_devbox_routes(user_id, environment_id).await; + }); + + if let Some(prev_env) = previous_environment.filter(|prev| *prev != environment_id) { + let state = self.state.clone(); + tokio::spawn(async move { + state.clear_devbox_routes_for_environment(prev_env).await; + }); + } + + Ok(()) + } + + async fn list_workload_intercepts( + self, + _context: tarpc::context::Context, + environment_id: Uuid, + ) -> Result, String> { + tracing::info!( + "list_workload_intercepts called for session {} environment {}", + self.session_id, + environment_id + ); + + let session = self + .state + .db + .get_active_devbox_session(self.user_id) + .await + .map_err(|e| format!("Failed to fetch session: {}", e))? + .ok_or_else(|| "Session not found".to_string())?; + + let intercepts = self + .state + .db + .get_active_intercepts_for_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch intercepts: {}", e))?; + + let mut result = Vec::new(); + for intercept in intercepts { + let workload = self + .state + .db + .get_environment_workload(intercept.workload_id) + .await + .map_err(|e| format!("Failed to fetch workload: {}", e))? + .ok_or_else(|| "Workload not found".to_string())?; + + let port_mappings: Vec = + serde_json::from_value(intercept.port_mappings.clone()) + .map_err(|e| format!("Failed to parse port mappings: {}", e))?; + + result.push(lapdev_devbox_rpc::WorkloadInterceptInfo { + intercept_id: intercept.id, + workload_id: workload.id, + workload_name: workload.name, + namespace: workload.namespace, + port_mappings, + created_at: intercept.created_at.with_timezone(&chrono::Utc), + device_name: session.device_name.clone(), + }); + } + + Ok(result) + } + + async fn list_services( + self, + _context: tarpc::context::Context, + environment_id: Uuid, + ) -> Result, String> { + tracing::info!( + "list_services called for session {} environment {}", + self.session_id, + environment_id + ); + + // Get environment and verify user has access + let environment = self + .state + .db + .get_kube_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch environment: {}", e))? + .ok_or_else(|| "Environment not found".to_string())?; + + // Verify user has access to this environment + if environment.user_id != self.user_id { + return Err("Unauthorized: You don't have access to this environment".to_string()); + } + + // Get all services for the environment + let services = self + .state + .db + .get_environment_services(environment_id) + .await + .map_err(|e| format!("Failed to fetch services: {}", e))?; + + let mut result = Vec::new(); + for service in services { + // Convert KubeServicePort to ServicePort + let ports: Vec = service + .ports + .iter() + .map(|p| lapdev_devbox_rpc::ServicePort { + name: p.name.clone(), + port: p.port as u16, + protocol: p.protocol.clone().unwrap_or_else(|| "TCP".to_string()), + }) + .collect(); + + result.push(lapdev_devbox_rpc::ServiceInfo { + name: service.name, + namespace: service.namespace, + ports, + }); + } + + Ok(result) + } + + async fn request_direct_client_config( + self, + _context: tarpc::context::Context, + environment_id: Uuid, + stun_observed_addr: Option, + ) -> Result, String> { + let environment = self + .state + .db + .get_kube_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch environment: {}", e))? + .ok_or_else(|| "Environment not found".to_string())?; + + if environment.user_id != self.user_id { + return Err("Unauthorized".to_string()); + } + + match self + .state + .kube_controller + .request_devbox_direct_config( + environment.cluster_id, + self.user_id, + environment.id, + environment.namespace.clone(), + stun_observed_addr, + ) + .await + { + Ok(config) => Ok(config), + Err(err) => { + tracing::warn!( + environment_id = %environment_id, + user_id = %self.user_id, + error = %err, + "Failed to fetch direct devbox config; falling back to relay" + ); + Ok(None) + } + } + } +} + +/// Implementation for DevboxInterceptRpc - allows dashboard/CLI to control intercepts +pub struct DevboxInterceptRpcImpl { + state: Arc, + user_id: Uuid, + _organization_id: Uuid, +} + +impl DevboxInterceptRpcImpl { + pub fn new(state: Arc, user_id: Uuid, organization_id: Uuid) -> Self { + Self { + state, + user_id, + _organization_id: organization_id, + } + } +} + +impl DevboxInterceptRpc for DevboxInterceptRpcImpl { + async fn start_workload_intercept( + self, + _context: tarpc::context::Context, + workload_id: Uuid, + port_mappings: Vec, + ) -> Result { + tracing::info!( + "start_workload_intercept called for user {} workload {}", + self.user_id, + workload_id + ); + + // Get workload to find environment and validate access + let workload = self + .state + .db + .get_environment_workload(workload_id) + .await + .map_err(|e| format!("Failed to fetch workload: {}", e))? + .ok_or_else(|| "Workload not found".to_string())?; + + // Get environment to validate user has access + let environment = self + .state + .db + .get_kube_environment(workload.environment_id) + .await + .map_err(|e| format!("Failed to fetch environment: {}", e))? + .ok_or_else(|| "Environment not found".to_string())?; + + if environment.is_shared { + return Err("Shared environments cannot start Devbox intercepts".to_string()); + } + + // Verify user has access to this environment + if environment.user_id != self.user_id { + return Err("Unauthorized: You don't have access to this environment".to_string()); + } + + // Convert port mapping overrides to actual port mappings + let mut actual_port_mappings = Vec::new(); + for override_mapping in &port_mappings { + let local_port = override_mapping + .local_port + .unwrap_or(override_mapping.workload_port); + actual_port_mappings.push(PortMapping { + workload_port: override_mapping.workload_port, + local_port, + protocol: "TCP".to_string(), + }); + } + + // Create intercept in database + let intercept = self + .state + .db + .create_workload_intercept( + self.user_id, + environment.id, + workload_id, + serde_json::to_value(&actual_port_mappings) + .map_err(|e| format!("Failed to serialize port mappings: {}", e))?, + ) + .await + .map_err(|e| format!("Failed to create intercept: {}", e))?; + + if self + .state + .active_devbox_sessions + .read() + .await + .get(&self.user_id) + .is_some() + { + let state = self.state.clone(); + let environment = environment.clone(); + let intercept_clone = intercept.clone(); + tokio::spawn(async move { + state + .push_devbox_route_for_intercept(environment, intercept_clone) + .await; + }); + } + + Ok(intercept.id) + } + + async fn stop_workload_intercept( + self, + _context: tarpc::context::Context, + intercept_id: Uuid, + ) -> Result<(), String> { + tracing::info!( + "stop_workload_intercept called for user {} intercept {}", + self.user_id, + intercept_id + ); + + // Get intercept to validate ownership + let intercept = self + .state + .db + .get_workload_intercept(intercept_id) + .await + .map_err(|e| format!("Failed to fetch intercept: {}", e))? + .ok_or_else(|| "Intercept not found".to_string())?; + + // Verify intercept belongs to this user + if intercept.user_id != self.user_id { + return Err("Unauthorized: This intercept doesn't belong to you".to_string()); + } + + // Stop the intercept in database + self.state + .db + .stop_workload_intercept(intercept_id) + .await + .map_err(|e| format!("Failed to stop intercept: {}", e))?; + + if self + .state + .active_devbox_sessions + .read() + .await + .get(&self.user_id) + .is_some() + { + match self + .state + .db + .get_kube_environment(intercept.environment_id) + .await + { + Ok(Some(environment)) => { + let state = self.state.clone(); + let intercept_clone = intercept.clone(); + tokio::spawn(async move { + state + .clear_devbox_route_for_intercept(environment, intercept_clone) + .await; + }); + } + Ok(None) => { + tracing::warn!( + environment_id = %intercept.environment_id, + intercept_id = %intercept.id, + "Environment not found while clearing devbox route after intercept stop" + ); + } + Err(err) => { + tracing::warn!( + environment_id = %intercept.environment_id, + intercept_id = %intercept.id, + error = %err, + "Failed to load environment while clearing devbox route after intercept stop" + ); + } + } + } + + Ok(()) + } +} diff --git a/crates/api/src/devbox_auth.rs b/crates/api/src/devbox_auth.rs new file mode 100644 index 0000000..3afbcbb --- /dev/null +++ b/crates/api/src/devbox_auth.rs @@ -0,0 +1,91 @@ +use anyhow::Result; +use axum_extra::headers::{self, authorization::Bearer}; +use lapdev_db_entities::user; +use lapdev_rpc::error::ApiError; +use pasetors::claims::{Claims, ClaimsValidationRules}; +use sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; +use uuid::Uuid; + +use crate::state::CoreState; + +/// Context for devbox/CLI authenticated requests +pub struct DevboxContext { + pub user: user::Model, + pub organization_id: Uuid, + pub session_id: Uuid, + pub device_name: String, + pub token_claims: Claims, +} + +impl CoreState { + /// Authenticate CLI request using Bearer token (PASETO) + /// Same pattern as browser session authentication - stateless! + pub async fn authenticate_bearer( + &self, + auth_header: &headers::Authorization, + ) -> Result { + let token = auth_header.token(); + + // 1. Parse and decrypt PASETO token (same as browser cookies) + let untrusted_token = pasetors::token::UntrustedToken::try_from(token) + .map_err(|_| ApiError::Unauthenticated)?; + let validated = pasetors::local::decrypt( + &self.auth_token_key, + &untrusted_token, + &ClaimsValidationRules::new(), + None, + None, + ) + .map_err(|_| ApiError::Unauthenticated)?; + + let claims = validated + .payload_claims() + .ok_or(ApiError::Unauthenticated)?; + + // 2. Extract and verify claims + let user_id: Uuid = claims + .get_claim("user_id") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .ok_or(ApiError::Unauthenticated)?; + + let organization_id: Uuid = claims + .get_claim("organization_id") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .ok_or(ApiError::Unauthenticated)?; + + let session_type: String = claims + .get_claim("session_type") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .unwrap_or_default(); + + if session_type != "cli" { + return Err(ApiError::Unauthenticated); + } + + let device_name: String = claims + .get_claim("device_name") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .ok_or(ApiError::Unauthenticated)?; + + let session_id: Uuid = claims + .get_claim("session_id") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .ok_or(ApiError::Unauthenticated)?; + + // 3. Load user (same as browser sessions - no session table lookup!) + let user = user::Entity::find_by_id(user_id) + .filter(user::Column::DeletedAt.is_null()) + .one(&self.db.conn) + .await + .map_err(|e| ApiError::InternalError(e.to_string()))? + .ok_or(ApiError::Unauthenticated)?; + + Ok(DevboxContext { + user, + organization_id, + session_id, + device_name, + token_claims: claims.clone(), + }) + } +} diff --git a/crates/api/src/devbox_tunnels.rs b/crates/api/src/devbox_tunnels.rs new file mode 100644 index 0000000..48e5c4c --- /dev/null +++ b/crates/api/src/devbox_tunnels.rs @@ -0,0 +1,202 @@ +use std::{ + collections::{BTreeMap, HashMap}, + io, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; + +use axum::extract::ws::{CloseFrame, Message as AxumMessage, Utf8Bytes, WebSocket}; +use bytes::Bytes; +use futures::{future, Sink, SinkExt, Stream, StreamExt}; +use lapdev_tunnel::{ + relay_client_addr, relay_server_addr, run_tunnel_server_with_connector, DynTunnelStream, + RelayEndpoint, TunnelClient, TunnelError, TunnelTarget, WebSocketUdpSocket, +}; +use tokio::sync::RwLock; +use tokio_tungstenite::tungstenite::{self as ws, Message as TungMessage}; +use tracing::{info, warn}; +use uuid::Uuid; + +pub struct DevboxTunnelRegistry { + sessions_by_user: Arc>>>>, + generation_counter: AtomicU64, +} + +impl DevboxTunnelRegistry { + pub fn new() -> Self { + Self { + sessions_by_user: Arc::new(RwLock::new(HashMap::new())), + generation_counter: AtomicU64::new(1), + } + } + + fn next_generation(&self) -> u64 { + self.generation_counter.fetch_add(1, Ordering::SeqCst) + } + + pub async fn register_cli(&self, user_id: Uuid, token: String, socket: WebSocket) { + let (sink, stream) = split_axum_websocket(socket); + let udp_socket = + WebSocketUdpSocket::from_parts(sink, stream, relay_server_addr(), relay_client_addr()); + + let connection = match RelayEndpoint::udp_server_connection(udp_socket).await { + Ok(connection) => connection, + Err(err) => { + warn!( + user_id = %user_id, + error = %err, + "Failed to perform QUIC handshake for CLI tunnel" + ); + return; + } + }; + + let client = Arc::new(TunnelClient::new_relay(connection)); + let generation = self.next_generation(); + + { + let mut sessions = self.sessions_by_user.write().await; + let entry = sessions.entry(user_id).or_insert_with(BTreeMap::new); + if entry.insert(generation, client.clone()).is_some() { + warn!( + "Overwrote Devbox CLI tunnel generation {} for user {}", + generation, user_id + ); + } + info!( + "Registered Devbox CLI tunnel for user {} generation {}; active entries {}", + user_id, + generation, + entry.len() + ); + } + + let sessions = Arc::clone(&self.sessions_by_user); + tokio::spawn(async move { + client.closed().await; + let mut sessions = sessions.write().await; + if let Some(entry) = sessions.get_mut(&user_id) { + if entry.remove(&generation).is_some() { + info!( + "Devbox CLI tunnel closed for user {} generation {}; remaining {}", + user_id, + generation, + entry.len() + ); + if entry.is_empty() { + sessions.remove(&user_id); + } + } else { + info!( + "Skip removing Devbox CLI tunnel for user {} due to generation mismatch (requested {})", + user_id, generation + ); + } + } else { + info!( + "Devbox CLI tunnel cleanup found no user {} for generation {}", + user_id, generation + ); + } + }); + } + + pub async fn attach_sidecar( + &self, + user_id: Uuid, + socket: WebSocket, + ) -> Result<(), TunnelError> { + let cli_client = { + let sessions = self.sessions_by_user.read().await; + sessions.get(&user_id).and_then(|entry| { + entry + .values() + .rev() + .find(|client| !client.is_closed()) + .cloned() + }) + }; + + let Some(cli_client) = cli_client else { + warn!( + "Received sidecar tunnel for user {} with no active CLI tunnel", + user_id + ); + return Err(TunnelError::Remote("no active CLI tunnel".to_string())); + }; + + let (sink, stream) = split_axum_websocket(socket); + let udp_socket = + WebSocketUdpSocket::from_parts(sink, stream, relay_server_addr(), relay_client_addr()); + + let connection = RelayEndpoint::udp_server_connection(udp_socket).await?; + let connector = move |target: TunnelTarget| { + let cli_client = cli_client.clone(); + async move { + let stream = cli_client + .connect_tcp(target.host.clone(), target.port) + .await?; + Ok::(Box::new(stream) as DynTunnelStream) + } + }; + + run_tunnel_server_with_connector(connection, connector).await + } +} + +pub(crate) fn split_axum_websocket( + socket: WebSocket, +) -> ( + impl Sink + Send + 'static, + impl Stream> + Send + 'static, +) { + let (sink, stream) = socket.split(); + + let sink = sink + .sink_map_err(io::Error::other) + .with(|message: TungMessage| future::ready(map_tungstenite_to_axum(message))); + + let stream = stream.filter_map(|msg| { + future::ready(match msg { + Ok(AxumMessage::Binary(data)) => Some(Ok(data)), + Ok(AxumMessage::Close(_)) => None, + Ok(_) => None, + Err(err) => Some(Err(io::Error::other(err))), + }) + }); + + (sink, stream) +} + +fn map_tungstenite_to_axum(message: TungMessage) -> io::Result { + match message { + ws::Message::Text(text) => { + let bytes: Bytes = text.into(); + let utf8 = Utf8Bytes::try_from(bytes) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + Ok(AxumMessage::Text(utf8)) + } + ws::Message::Binary(data) => Ok(AxumMessage::Binary(data)), + ws::Message::Ping(data) => Ok(AxumMessage::Ping(data)), + ws::Message::Pong(data) => Ok(AxumMessage::Pong(data)), + ws::Message::Close(frame) => { + let mapped = frame.map(|close| CloseFrame { + code: close.code.into(), + reason: Utf8Bytes::from(close.reason.to_string()), + }); + Ok(AxumMessage::Close(mapped)) + } + ws::Message::Frame(_) => Err(io::Error::new( + io::ErrorKind::InvalidData, + "frame messages are not supported", + )), + } +} + +impl Default for DevboxTunnelRegistry { + fn default() -> Self { + Self::new() + } +} diff --git a/crates/api/src/environment_events.rs b/crates/api/src/environment_events.rs new file mode 100644 index 0000000..551e57a --- /dev/null +++ b/crates/api/src/environment_events.rs @@ -0,0 +1,181 @@ +use std::{convert::Infallible, sync::Arc, time::Duration}; + +use axum::{ + extract::{Path, State}, + response::sse::{Event, KeepAlive, Sse}, +}; +use axum_extra::{headers, TypedHeader}; +use chrono::{DateTime, Utc}; +use futures::{Stream, StreamExt}; +use lapdev_common::kube::{EnvironmentWorkloadStatusEvent, KubeEnvironmentStatus}; +use lapdev_rpc::error::ApiError; +use serde::{Deserialize, Serialize}; +use tokio_stream::wrappers::BroadcastStream; +use tracing::warn; +use uuid::Uuid; + +use crate::state::CoreState; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnvironmentLifecycleEvent { + pub organization_id: Uuid, + pub environment_id: Uuid, + pub status: KubeEnvironmentStatus, + pub paused_at: Option, + pub resumed_at: Option, + pub updated_at: DateTime, +} + +pub async fn stream_environment_events( + Path((org_id, environment_id)): Path<(Uuid, Uuid)>, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let environment = state + .db + .get_kube_environment(environment_id) + .await? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + let receiver = state.environment_events.subscribe(); + + let target_org = org_id; + let target_env = environment_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| { + let target_org = target_org; + let target_env = target_env; + async move { + match result { + Ok(event) + if event.organization_id == target_org + && event.environment_id == target_env => + { + build_sse_event(&event).map(Ok) + } + Ok(_) => None, + Err(err) => { + warn!("environment event stream lagged: {err}"); + None + } + } + } + }); + + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(event_stream).keep_alive(keep_alive)) +} + +pub async fn stream_organization_environment_events( + Path(org_id): Path, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let receiver = state.environment_events.subscribe(); + let target_org = org_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| { + let target_org = target_org; + async move { + match result { + Ok(event) if event.organization_id == target_org => build_sse_event(&event).map(Ok), + Ok(_) => None, + Err(err) => { + warn!("environment event stream lagged: {err}"); + None + } + } + } + }); + + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(event_stream).keep_alive(keep_alive)) +} + +pub async fn stream_environment_workload_events( + Path((org_id, environment_id)): Path<(Uuid, Uuid)>, + State(state): State>, + TypedHeader(cookies): TypedHeader, +) -> Result>>, ApiError> { + let user = state.authenticate(&cookies).await?; + state + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + + let environment = state + .db + .get_kube_environment(environment_id) + .await? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + let receiver = state.environment_workload_events.subscribe(); + let target_org = org_id; + let target_env = environment_id; + + let event_stream = BroadcastStream::new(receiver).filter_map(move |result| { + let target_org = target_org; + let target_env = target_env; + async move { + match result { + Ok(event) + if event.organization_id == target_org + && event.environment_id == target_env => + { + build_workload_sse_event(&event).map(Ok) + } + Ok(_) => None, + Err(err) => { + warn!("environment workload event stream lagged: {err}"); + None + } + } + } + }); + + let keep_alive = KeepAlive::new() + .interval(Duration::from_secs(15)) + .text("keep-alive"); + + Ok(Sse::new(event_stream).keep_alive(keep_alive)) +} + +fn build_sse_event(event: &EnvironmentLifecycleEvent) -> Option { + Event::default().event("environment").json_data(event).ok() +} + +fn build_workload_sse_event(event: &EnvironmentWorkloadStatusEvent) -> Option { + Event::default() + .event("environment_workload") + .json_data(event) + .ok() +} diff --git a/lapdev-api/src/github.rs b/crates/api/src/github.rs similarity index 100% rename from lapdev-api/src/github.rs rename to crates/api/src/github.rs diff --git a/lapdev-api/src/gitlab.rs b/crates/api/src/gitlab.rs similarity index 100% rename from lapdev-api/src/gitlab.rs rename to crates/api/src/gitlab.rs diff --git a/crates/api/src/hrpc_service.rs b/crates/api/src/hrpc_service.rs new file mode 100644 index 0000000..09ffd38 --- /dev/null +++ b/crates/api/src/hrpc_service.rs @@ -0,0 +1,1418 @@ +use axum_extra::headers; +use chrono::{DateTime, Utc}; +use lapdev_api_hrpc::HrpcService; +use lapdev_common::{ + devbox::{ + DevboxEnvironmentSelection, DevboxPortMapping, DevboxPortMappingOverride, + DevboxSessionSummary, DevboxSessionWhoAmI, DevboxStartWorkloadInterceptResponse, + DevboxWorkloadInterceptListResponse, DevboxWorkloadInterceptSummary, + }, + hrpc::HrpcError, + kube::{ + CreateKubeClusterResponse, KubeAppCatalog, KubeAppCatalogWorkload, + KubeAppCatalogWorkloadCreate, KubeCluster, KubeEnvironment, + KubeEnvironmentDashboardSummary, KubeEnvironmentWorkload, KubeEnvironmentWorkloadDetail, + KubeNamespace, KubeNamespaceInfo, KubeWorkload, KubeWorkloadKind, KubeWorkloadList, + PagePaginationParams, PaginatedResult, PaginationParams, + }, + UserRole, +}; +use lapdev_rpc::error::ApiError; +use pasetors::claims::Claims; +use sea_orm::DbErr; +use tarpc::context; +use uuid::Uuid; + +use crate::state::CoreState; + +impl HrpcService for CoreState { + async fn devbox_session_get_session( + &self, + headers: &axum::http::HeaderMap, + ) -> Result, HrpcError> { + let user = self.hrpc_authenticate_user(headers).await?; + let session = self + .db + .get_active_devbox_session(user.id) + .await + .map_err(hrpc_from_anyhow)?; + + let session = session.map(|session| DevboxSessionSummary { + id: session.id, + device_name: session.device_name, + token_prefix: session.token_prefix, + active_environment_id: session.active_environment_id, + created_at: session.created_at.with_timezone(&Utc), + expires_at: session.expires_at.with_timezone(&Utc), + last_used_at: session.last_used_at.with_timezone(&Utc), + revoked_at: session.revoked_at.map(|ts| ts.with_timezone(&Utc)), + }); + + Ok(session) + } + + async fn devbox_session_revoke_session( + &self, + headers: &axum::http::HeaderMap, + session_id: Uuid, + ) -> Result<(), HrpcError> { + let user = self.hrpc_authenticate_user(headers).await?; + let session = self + .db + .get_devbox_session_by_id(session_id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Session not found"))?; + + if session.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + + if session.revoked_at.is_some() { + return Ok(()); + } + + self.db + .revoke_devbox_session_by_id(session_id) + .await + .map_err(hrpc_from_anyhow)?; + + let session_notification = { + let sessions = self.active_devbox_sessions.read().await; + sessions.get(&user.id).and_then(|entries| { + entries + .iter() + .find(|(_, handle)| handle.session_id == session_id) + .map(|(connection_id, handle)| (*connection_id, handle.rpc_client.clone())) + }) + }; + + if let Some((connection_id, client)) = session_notification { + { + let mut sessions = self.active_devbox_sessions.write().await; + if let Some(entries) = sessions.get_mut(&user.id) { + entries.remove(&connection_id); + if entries.is_empty() { + sessions.remove(&user.id); + } + } + } + tokio::spawn(async move { + let _ = client + .session_displaced( + context::current(), + "Session revoked by dashboard".to_string(), + ) + .await; + }); + } + + Ok(()) + } + + async fn devbox_session_get_active_environment( + &self, + headers: &axum::http::HeaderMap, + ) -> Result, HrpcError> { + let ctx = self.hrpc_resolve_active_devbox_session(headers).await?; + + let Some(environment_id) = ctx.session.active_environment_id else { + return Ok(None); + }; + + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)?; + + let Some(environment) = environment else { + return Ok(None); + }; + + let cluster = self + .db + .get_kube_cluster(environment.cluster_id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Cluster not found"))?; + + Ok(Some(DevboxEnvironmentSelection { + environment_id, + cluster_name: cluster.name, + namespace: environment.namespace, + })) + } + + async fn devbox_session_set_active_environment( + &self, + headers: &axum::http::HeaderMap, + environment_id: Uuid, + ) -> Result<(), HrpcError> { + let ctx = self.hrpc_resolve_active_devbox_session(headers).await?; + let previous_environment = ctx.session.active_environment_id; + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.is_shared { + return Err(hrpc_error( + "Shared environments cannot be used as active devbox environments", + )); + } + + self.ensure_environment_access(&ctx.user, &environment) + .await?; + + self.db + .update_devbox_session_active_environment(ctx.session.user_id, Some(environment_id)) + .await + .map_err(hrpc_from_anyhow)?; + + // Notify CLI about the environment change + let cluster = self + .db + .get_kube_cluster(environment.cluster_id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Cluster not found"))?; + + let env_info = lapdev_devbox_rpc::DevboxEnvironmentInfo { + environment_id: environment.id, + environment_name: environment.name.clone(), + cluster_name: cluster.name.clone(), + namespace: environment.namespace.clone(), + }; + + tracing::info!( + "set_active_environment (HRPC) called for session {} with environment {} ({}/{})", + ctx.session.id, + environment_id, + cluster.name, + environment.namespace + ); + + // Get the RPC client for the active session + let rpc_client = { + let sessions = self.active_devbox_sessions.read().await; + sessions.get(&ctx.user.id).and_then(|entries| { + entries + .values() + .next_back() + .map(|handle| handle.rpc_client.clone()) + }) + }; + + if let Some(client) = rpc_client { + tracing::info!( + "Spawning task to notify CLI about environment change to {} ({})", + env_info.cluster_name, + env_info.namespace + ); + tokio::spawn(async move { + tracing::info!("Calling environment_changed RPC on client..."); + match client + .environment_changed(context::current(), Some(env_info.clone())) + .await + { + Ok(_) => { + tracing::info!( + "Successfully notified client of environment change to {}", + env_info.namespace + ); + } + Err(e) => { + tracing::warn!("Failed to notify client of environment change: {}", e); + } + } + }); + } else { + tracing::warn!( + "No active WebSocket connection for user {}, environment change saved but CLI not notified", + ctx.user.id + ); + } + + let state = self.clone(); + let user_id = ctx.user.id; + tokio::spawn(async move { + state.push_devbox_routes(user_id, environment_id).await; + }); + + if let Some(prev_env) = previous_environment.filter(|prev| *prev != environment_id) { + let state = self.clone(); + tokio::spawn(async move { + state.clear_devbox_routes_for_environment(prev_env).await; + }); + } + + Ok(()) + } + + async fn devbox_session_whoami( + &self, + headers: &axum::http::HeaderMap, + ) -> Result { + let ctx = self.hrpc_resolve_active_devbox_session(headers).await?; + + Ok(DevboxSessionWhoAmI { + user_id: ctx.user.id, + email: ctx.user.email.clone(), + device_name: ctx.session.device_name, + authenticated_at: ctx.authenticated_at, + expires_at: ctx.expires_at, + }) + } + + async fn devbox_intercept_list( + &self, + headers: &axum::http::HeaderMap, + environment_id: Uuid, + ) -> Result { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + let user = self + .authorize(headers, environment.organization_id, None) + .await + .map_err(hrpc_from_api_error)?; + + self.ensure_environment_access(&user, &environment).await?; + + let intercepts = self + .db + .list_workload_intercepts_for_environment(environment_id) + .await + .map_err(hrpc_from_anyhow)?; + + let mut results = Vec::with_capacity(intercepts.len()); + for intercept in intercepts { + let workload = self + .db + .get_environment_workload(intercept.workload_id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Workload not found"))?; + + let port_mappings: Vec = + serde_json::from_value(intercept.port_mappings.clone()) + .map_err(hrpc_from_anyhow)?; + + results.push(DevboxWorkloadInterceptSummary { + intercept_id: intercept.id, + session_id: Uuid::nil(), // Intercepts are no longer tied to sessions + workload_id: workload.id, + workload_name: workload.name, + namespace: workload.namespace, + port_mappings, + created_at: intercept.created_at.with_timezone(&Utc), + restored_at: intercept.stopped_at.map(|dt| dt.with_timezone(&Utc)), + }); + } + + Ok(DevboxWorkloadInterceptListResponse { + intercepts: results, + }) + } + + async fn devbox_intercept_start( + &self, + headers: &axum::http::HeaderMap, + workload_id: Uuid, + port_mappings: Vec, + ) -> Result { + let user = self.hrpc_authenticate_user(headers).await?; + + let workload = self + .db + .get_environment_workload(workload_id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Workload not found"))?; + + let environment = self + .db + .get_kube_environment(workload.environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.is_shared { + return Err(hrpc_error( + "Shared environments cannot start Devbox intercepts", + )); + } + + self.ensure_environment_access(&user, &environment).await?; + + let mappings = port_mappings + .into_iter() + .map(|override_mapping| DevboxPortMapping { + workload_port: override_mapping.workload_port, + local_port: override_mapping + .local_port + .unwrap_or(override_mapping.workload_port), + protocol: "TCP".to_string(), + }) + .collect::>(); + + let intercept = self + .db + .create_workload_intercept( + user.id, + environment.id, + workload_id, + serde_json::to_value(&mappings).map_err(hrpc_from_anyhow)?, + ) + .await + .map_err(hrpc_from_anyhow)?; + + if self + .active_devbox_sessions + .read() + .await + .get(&user.id) + .is_some() + { + let state = self.clone(); + let environment = environment.clone(); + let intercept_clone = intercept.clone(); + tokio::spawn(async move { + state + .push_devbox_route_for_intercept(environment, intercept_clone) + .await; + }); + } + + Ok(DevboxStartWorkloadInterceptResponse { + intercept_id: intercept.id, + }) + } + + async fn devbox_intercept_stop( + &self, + headers: &axum::http::HeaderMap, + intercept_id: Uuid, + ) -> Result<(), HrpcError> { + let user = self.hrpc_authenticate_user(headers).await?; + + let intercept = self + .db + .get_workload_intercept(intercept_id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Intercept not found"))?; + + if intercept.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + + self.db + .stop_workload_intercept(intercept_id) + .await + .map_err(hrpc_from_anyhow)?; + if self + .active_devbox_sessions + .read() + .await + .get(&user.id) + .is_some() + { + match self.db.get_kube_environment(intercept.environment_id).await { + Ok(Some(environment)) => { + let state = self.clone(); + let intercept_clone = intercept.clone(); + tokio::spawn(async move { + state + .clear_devbox_route_for_intercept(environment, intercept_clone) + .await; + }); + } + Ok(None) => { + tracing::warn!( + environment_id = %intercept.environment_id, + intercept_id = %intercept.id, + "Environment not found while clearing devbox route after intercept stop" + ); + } + Err(err) => { + tracing::warn!( + environment_id = %intercept.environment_id, + intercept_id = %intercept.id, + error = %err, + "Failed to load environment while clearing devbox route after intercept stop" + ); + } + } + } + + Ok(()) + } + + async fn all_kube_clusters( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + ) -> Result, HrpcError> { + let _ = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_all_kube_clusters(org_id) + .await + .map_err(HrpcError::from) + } + + async fn create_kube_cluster( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + name: String, + ) -> Result { + let user = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .create_kube_cluster(org_id, user.id, name) + .await + .map_err(HrpcError::from) + } + + async fn delete_kube_cluster( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .delete_kube_cluster(org_id, cluster_id) + .await + .map_err(HrpcError::from) + } + + async fn set_cluster_deployable( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + can_deploy_personal: bool, + can_deploy_shared: bool, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .set_cluster_deployable(org_id, cluster_id, can_deploy_personal, can_deploy_shared) + .await + .map_err(HrpcError::from) + } + + async fn set_cluster_personal_deployable( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + can_deploy: bool, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .set_cluster_personal_deployable(org_id, cluster_id, can_deploy) + .await + .map_err(HrpcError::from) + } + + async fn set_cluster_shared_deployable( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + can_deploy: bool, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .set_cluster_shared_deployable(org_id, cluster_id, can_deploy) + .await + .map_err(HrpcError::from) + } + + async fn get_workloads( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + namespace: Option, + workload_kind_filter: Option, + include_system_workloads: bool, + pagination: Option, + ) -> Result { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_workloads( + org_id, + cluster_id, + namespace, + workload_kind_filter, + include_system_workloads, + pagination, + ) + .await + .map_err(HrpcError::from) + } + + async fn get_workload_details( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + name: String, + namespace: String, + ) -> Result, HrpcError> { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_workload_details(org_id, cluster_id, name, namespace) + .await + .map_err(HrpcError::from) + } + + async fn get_cluster_namespaces( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result, HrpcError> { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_cluster_namespaces(org_id, cluster_id) + .await + .map_err(HrpcError::from) + } + + async fn get_cluster_info( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_cluster_info(org_id, cluster_id) + .await + .map_err(HrpcError::from) + } + + async fn create_app_catalog( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + cluster_id: Uuid, + name: String, + description: Option, + workloads: Vec, + ) -> Result { + let user = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .create_app_catalog(org_id, user.id, cluster_id, name, description, workloads) + .await + .map_err(HrpcError::from) + } + + async fn all_app_catalogs( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + search: Option, + pagination: Option, + ) -> Result, HrpcError> { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_all_app_catalogs(org_id, search, pagination) + .await + .map_err(HrpcError::from) + } + + async fn get_app_catalog( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_app_catalog(org_id, catalog_id) + .await + .map_err(HrpcError::from) + } + + async fn get_app_catalog_workloads( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result, HrpcError> { + let _ = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_app_catalog_workloads(org_id, catalog_id) + .await + .map_err(HrpcError::from) + } + + async fn all_kube_environments( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + search: Option, + is_shared: bool, + is_branch: bool, + pagination: Option, + ) -> Result, HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_all_kube_environments(org_id, user.id, search, is_shared, is_branch, pagination) + .await + .map_err(HrpcError::from) + } + + async fn get_environment_dashboard_summary( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + recent_limit: Option, + ) -> Result { + let user = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_environment_dashboard_summary(org_id, user.id, recent_limit) + .await + .map_err(HrpcError::from) + } + + async fn get_kube_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_kube_environment(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn delete_kube_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.organization_id != org_id { + return Err(hrpc_error( + "Environment does not belong to the organization", + )); + } + + let required_role = environment.is_shared.then_some(UserRole::Admin); + let user = self.authorize(headers, org_id, required_role).await?; + + if !environment.is_shared && environment.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + + self.kube_controller + .delete_kube_environment(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn pause_kube_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.organization_id != org_id { + return Err(hrpc_error( + "Environment does not belong to the organization", + )); + } + + if environment.is_shared { + return Err(hrpc_error( + "Pause is not yet supported for shared environments", + )); + } + + let user = self.authorize(headers, org_id, None).await?; + + if environment.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + self.kube_controller + .pause_kube_environment(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn resume_kube_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.organization_id != org_id { + return Err(hrpc_error( + "Environment does not belong to the organization", + )); + } + + if environment.is_shared { + return Err(hrpc_error( + "Resume is not yet supported for shared environments", + )); + } + + let user = self.authorize(headers, org_id, None).await?; + + if environment.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + self.kube_controller + .resume_kube_environment(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn sync_environment_from_catalog( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .sync_environment_from_catalog(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn rebase_branch_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result<(), HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .rebase_branch_environment(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn delete_app_catalog( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .delete_app_catalog(org_id, catalog_id) + .await + .map_err(HrpcError::from) + } + + async fn delete_app_catalog_workload( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + workload_id: Uuid, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .delete_app_catalog_workload(org_id, workload_id) + .await + .map_err(HrpcError::from) + } + + async fn update_app_catalog_workload( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + workload_id: Uuid, + containers: Vec, + ) -> Result<(), HrpcError> { + let _ = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .update_app_catalog_workload(org_id, workload_id, containers) + .await + .map_err(HrpcError::from) + } + + async fn add_workloads_to_app_catalog( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + catalog_id: Uuid, + workloads: Vec, + ) -> Result<(), HrpcError> { + let user = self + .authorize(headers, org_id, Some(UserRole::Admin)) + .await?; + + self.kube_controller + .add_workloads_to_app_catalog(org_id, user.id, catalog_id, workloads) + .await + .map_err(HrpcError::from) + } + + async fn create_kube_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + app_catalog_id: Uuid, + cluster_id: Uuid, + name: String, + is_shared: bool, + ) -> Result { + let required_role = is_shared.then_some(UserRole::Admin); + let user = self.authorize(headers, org_id, required_role).await?; + + self.kube_controller + .create_kube_environment(org_id, user.id, app_catalog_id, cluster_id, name, is_shared) + .await + .map_err(HrpcError::from) + } + + async fn create_branch_environment( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + base_environment_id: Uuid, + name: String, + ) -> Result { + let user = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .create_branch_environment(org_id, user.id, base_environment_id, name) + .await + .map_err(HrpcError::from) + } + + async fn get_environment_workloads( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result, HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_environment_workloads(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn get_environment_services( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result, HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_environment_services(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn get_environment_workload( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + workload_id: Uuid, + ) -> Result, HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_environment_workload(org_id, user.id, workload_id) + .await + .map_err(HrpcError::from) + } + + async fn get_environment_workload_detail( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + ) -> Result { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_environment_workload_detail(org_id, user.id, environment_id, workload_id) + .await + .map_err(HrpcError::from) + } + + async fn update_environment_workload( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + containers: Vec, + ) -> Result { + let workload = self + .db + .get_environment_workload(workload_id) + .await + .map_err(|e| HrpcError::from(ApiError::from(e)))? + .ok_or_else(|| { + HrpcError::from(ApiError::InvalidRequest("Workload not found".to_string())) + })?; + + let target_environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(|e| HrpcError::from(ApiError::from(e)))? + .ok_or_else(|| { + HrpcError::from(ApiError::InvalidRequest( + "Environment not found".to_string(), + )) + })?; + + if target_environment.organization_id != org_id { + return Err(HrpcError::from(ApiError::Unauthorized)); + } + + // Ensure the workload belongs to this environment or its base + if workload.environment_id != target_environment.id { + let base_env = target_environment.base_environment_id.ok_or_else(|| { + // Branch workloads reference their base shared environment; if there's no base we can't continue + HrpcError::from(ApiError::InvalidRequest( + "Workload does not belong to the target environment".to_string(), + )) + })?; + if workload.environment_id != base_env { + return Err(HrpcError::from(ApiError::InvalidRequest( + "Workload does not belong to the target environment".to_string(), + ))); + } + } + + // Determine required role based on the target environment. Branch workloads inherit permissions from their branch. + let required_role = target_environment.is_shared.then_some(UserRole::Admin); + + let user = self.authorize(headers, org_id, required_role).await?; + + if !target_environment.is_shared && target_environment.user_id != user.id { + return Err(HrpcError::from(ApiError::Unauthorized)); + } + + self.kube_controller + .update_environment_workload( + org_id, + user.id, + workload_id, + containers, + target_environment, + ) + .await + .map_err(HrpcError::from) + } + + async fn delete_environment_workload( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + ) -> Result<(), HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .delete_environment_workload(org_id, user.id, environment_id, workload_id) + .await + .map_err(HrpcError::from) + } + + async fn create_kube_namespace( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + name: String, + description: Option, + is_shared: bool, + ) -> Result { + // Only admin can create shared namespaces, regular users can create personal ones + let required_role = if is_shared { + Some(UserRole::Admin) + } else { + None + }; + let user = self.authorize(headers, org_id, required_role).await?; + + self.kube_controller + .create_kube_namespace(org_id, user.id, name, description, is_shared) + .await + .map_err(HrpcError::from) + } + + async fn all_kube_namespaces( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + is_shared: bool, + ) -> Result, HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + + self.kube_controller + .get_all_kube_namespaces(org_id, user.id, is_shared) + .await + .map_err(HrpcError::from) + } + + async fn delete_kube_namespace( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + namespace_id: Uuid, + ) -> Result<(), HrpcError> { + // Get the namespace first to check if it's shared + let namespace = self + .kube_controller + .db + .get_kube_namespace(namespace_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Namespace not found".to_string()))?; + + if namespace.organization_id != org_id { + return Err(ApiError::Unauthorized)?; + } + + // For shared namespaces, only admin can delete + // For personal namespaces, any user can delete (with ownership check below) + let required_role = if namespace.is_shared { + Some(UserRole::Admin) + } else { + None + }; + let user = self.authorize(headers, org_id, required_role).await?; + + // For personal namespaces, only the creator can delete + if !namespace.is_shared && namespace.user_id != user.id { + return Err(ApiError::InvalidRequest( + "You can only delete namespaces you created".to_string(), + ))?; + } + + self.kube_controller + .delete_kube_namespace(org_id, namespace_id) + .await + .map_err(HrpcError::from) + } + + async fn create_environment_preview_url( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + request: lapdev_common::kube::CreateKubeEnvironmentPreviewUrlRequest, + ) -> Result { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.organization_id != org_id { + return Err(hrpc_error( + "Environment does not belong to the organization", + )); + } + + let required_role = environment.is_shared.then_some(UserRole::Admin); + let user = self.authorize(headers, org_id, required_role).await?; + + if !environment.is_shared && environment.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + + self.kube_controller + .create_environment_preview_url(user.id, environment, request) + .await + .map_err(HrpcError::from) + } + + async fn get_environment_preview_urls( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + environment_id: Uuid, + ) -> Result, HrpcError> { + let user = self.authorize(headers, org_id, None).await?; + self.kube_controller + .get_environment_preview_urls(org_id, user.id, environment_id) + .await + .map_err(HrpcError::from) + } + + async fn update_environment_preview_url( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + preview_url_id: Uuid, + request: lapdev_common::kube::UpdateKubeEnvironmentPreviewUrlRequest, + ) -> Result { + let preview_url = self + .db + .get_environment_preview_url(preview_url_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Preview URL not found"))?; + + let environment = self + .db + .get_kube_environment(preview_url.environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.organization_id != org_id { + return Err(hrpc_error( + "Environment does not belong to the organization", + )); + } + + let required_role = environment.is_shared.then_some(UserRole::Admin); + let user = self.authorize(headers, org_id, required_role).await?; + + if !environment.is_shared && environment.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + + self.kube_controller + .update_environment_preview_url(preview_url_id, request) + .await + .map_err(HrpcError::from) + } + + async fn delete_environment_preview_url( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + preview_url_id: Uuid, + ) -> Result<(), HrpcError> { + let preview_url = self + .db + .get_environment_preview_url(preview_url_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Preview URL not found"))?; + + let environment = self + .db + .get_kube_environment(preview_url.environment_id) + .await + .map_err(hrpc_from_db_err)? + .ok_or_else(|| hrpc_error("Environment not found"))?; + + if environment.organization_id != org_id { + return Err(hrpc_error( + "Environment does not belong to the organization", + )); + } + + let required_role = environment.is_shared.then_some(UserRole::Admin); + let user = self.authorize(headers, org_id, required_role).await?; + + if !environment.is_shared && environment.user_id != user.id { + return Err(hrpc_error("Unauthorized")); + } + + self.kube_controller + .delete_environment_preview_url(preview_url_id) + .await + .map_err(HrpcError::from) + } +} + +struct ActiveDevboxSessionContext { + user: lapdev_db_entities::user::Model, + session: lapdev_db_entities::kube_devbox_session::Model, + authenticated_at: Option>, + expires_at: Option>, +} + +impl CoreState { + async fn hrpc_authenticate_user( + &self, + headers: &axum::http::HeaderMap, + ) -> Result { + self.authenticate_raw(headers) + .await + .map_err(hrpc_from_api_error) + } + + async fn hrpc_resolve_active_devbox_session( + &self, + headers: &axum::http::HeaderMap, + ) -> Result { + if let Some(value) = headers.get(axum::http::header::AUTHORIZATION) { + let value = value + .to_str() + .map_err(|_| hrpc_error("Invalid Authorization header"))?; + + if let Some(token) = value.strip_prefix("Bearer ") { + let bearer = headers::Authorization::bearer(token) + .map_err(|_| hrpc_error("Invalid bearer token"))?; + let ctx = self + .authenticate_bearer(&bearer) + .await + .map_err(hrpc_from_api_error)?; + + let session = self + .db + .get_devbox_session_by_token_hash(token) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("Active session not found"))?; + + self.db + .update_devbox_session_last_used(session.user_id) + .await + .map_err(hrpc_from_anyhow)?; + + return Ok(ActiveDevboxSessionContext { + user: ctx.user, + session, + authenticated_at: parse_claim_datetime(&ctx.token_claims, "iat"), + expires_at: parse_claim_datetime(&ctx.token_claims, "exp"), + }); + } + } + + let user = self.hrpc_authenticate_user(headers).await?; + let session = self + .db + .get_active_devbox_session(user.id) + .await + .map_err(hrpc_from_anyhow)? + .ok_or_else(|| hrpc_error("No active devbox session"))?; + + self.db + .update_devbox_session_last_used(session.user_id) + .await + .map_err(hrpc_from_anyhow)?; + + Ok(ActiveDevboxSessionContext { + authenticated_at: Some(session.created_at.with_timezone(&Utc)), + expires_at: Some(session.expires_at.with_timezone(&Utc)), + user, + session, + }) + } + + async fn ensure_environment_access( + &self, + user: &lapdev_db_entities::user::Model, + environment: &lapdev_db_entities::kube_environment::Model, + ) -> Result<(), HrpcError> { + if environment.user_id == user.id { + return Ok(()); + } + + if environment.is_shared { + self.db + .get_organization_member(user.id, environment.organization_id) + .await + .map(|_| ()) + .map_err(|_| hrpc_error("Unauthorized")) + } else { + Err(hrpc_error("Unauthorized")) + } + } +} + +fn hrpc_error(err: E) -> HrpcError { + HrpcError { + error: err.to_string(), + } +} + +fn hrpc_from_anyhow(err: E) -> HrpcError { + hrpc_error(err) +} + +fn hrpc_from_api_error(err: ApiError) -> HrpcError { + hrpc_error(err) +} + +fn hrpc_from_db_err(err: DbErr) -> HrpcError { + hrpc_error(err) +} + +fn parse_claim_datetime(claims: &Claims, key: &str) -> Option> { + claims + .get_claim(key) + .and_then(|value| serde_json::from_value::(value.clone()).ok()) + .and_then(|value| chrono::DateTime::parse_from_rfc3339(&value).ok()) + .map(|dt| dt.with_timezone(&Utc)) +} diff --git a/crates/api/src/kube.rs b/crates/api/src/kube.rs new file mode 100644 index 0000000..a7589b8 --- /dev/null +++ b/crates/api/src/kube.rs @@ -0,0 +1,405 @@ +use std::{net::SocketAddr, sync::Arc}; + +use axum::{ + extract::{ws::WebSocket, Path, State, WebSocketUpgrade}, + http::HeaderMap, + response::Response, +}; +use futures::{future::BoxFuture, StreamExt}; +use lapdev_common::{ + devbox::DirectTunnelConfig, + kube::{KUBE_CLUSTER_TOKEN_HEADER, KUBE_ENVIRONMENT_TOKEN_HEADER}, + token::HashedToken, +}; +use lapdev_kube::server::{DirectConfigProvider, KubeClusterServer}; +use lapdev_kube_rpc::{KubeClusterRpc, KubeManagerRpcClient}; +use lapdev_rpc::{error::ApiError, spawn_twoway}; +use lapdev_tunnel::{ + relay_client_addr, relay_server_addr, run_tunnel_server_with_connector, + websocket_serde_transport_from_socket, DynTunnelStream, RelayEndpoint, TunnelClient, + TunnelError, TunnelMode, TunnelTarget, WebSocketUdpSocket, +}; +use secrecy::ExposeSecret; +use tarpc::{ + context, + server::{BaseChannel, Channel}, +}; +use uuid::Uuid; + +use crate::{ + devbox_tunnels::split_axum_websocket, state::CoreState, websocket_socket::AxumBinarySocket, +}; + +struct CliDirectConfigProvider { + state: Arc, +} + +impl CliDirectConfigProvider { + fn new(state: Arc) -> Self { + Self { state } + } +} + +impl DirectConfigProvider for CliDirectConfigProvider { + fn request_direct_config( + &self, + user_id: Uuid, + stun_observed_addr: Option, + ) -> BoxFuture<'static, Result, String>> { + let state = self.state.clone(); + Box::pin(async move { + let rpc_client = { + let sessions = state.active_devbox_sessions.read().await; + sessions.get(&user_id).and_then(|entries| { + entries + .values() + .rev() + .next() + .map(|handle| handle.rpc_client.clone()) + }) + }; + + let Some(client) = rpc_client else { + return Err("No active Devbox CLI session found for user".to_string()); + }; + + client + .request_direct_config(context::current(), user_id, stun_observed_addr) + .await + .map_err(|err| { + format!( + "Failed to dispatch request_direct_config RPC to Devbox CLI: {}", + err + ) + })? + }) + } +} + +pub async fn kube_cluster_rpc_websocket( + websocket: WebSocketUpgrade, + headers: HeaderMap, + State(state): State>, +) -> Result { + tracing::debug!("now handle kube_cluster_websocket"); + let token = headers + .get(KUBE_CLUSTER_TOKEN_HEADER) + .ok_or(ApiError::Unauthenticated)? + .to_str()?; + let token = HashedToken::parse(token); + let token = state + .db + .get_kube_token(token.expose_secret()) + .await? + .ok_or(ApiError::Unauthenticated)?; + + state.db.update_kube_token_last_used(token.id).await?; + let cluster = state + .db + .get_kube_cluster(token.cluster_id) + .await? + .ok_or_else(|| { + ApiError::InternalError(format!("Cluster {} doesn't exist", token.cluster_id)) + })?; + + tracing::debug!("now handle cluster websocket"); + Ok(handle_cluster_websocket(websocket, state, cluster.id)) +} + +fn handle_cluster_websocket( + websocket: WebSocketUpgrade, + state: Arc, + cluster_id: Uuid, +) -> Response { + websocket + .on_failed_upgrade(|e| tracing::error!("websocket upgrade failed {e:?}")) + .on_upgrade(move |socket| async move { + handle_cluster_rpc(socket, state, cluster_id).await; + }) +} + +async fn handle_cluster_rpc(socket: WebSocket, state: Arc, cluster_id: Uuid) { + let socket = AxumBinarySocket::new(socket); + let transport = websocket_serde_transport_from_socket( + socket, + tarpc::tokio_serde::formats::Bincode::default(), + ); + let (server_chan, client_chan, _) = spawn_twoway(transport); + let rpc_client = + KubeManagerRpcClient::new(tarpc::client::Config::default(), client_chan).spawn(); + let direct_config_provider: Arc = + Arc::new(CliDirectConfigProvider::new(state.clone())); + let rpc_server = KubeClusterServer::new( + cluster_id, + rpc_client, + state.db.clone(), + state.kube_controller.kube_cluster_servers.clone(), + state.kube_controller.kube_cluster_server_generation.clone(), + state.kube_controller.tunnel_registry.clone(), + direct_config_provider, + ); + + let fut = { + let rpc_server = rpc_server.clone(); + tokio::spawn(async move { + BaseChannel::with_defaults(server_chan) + .execute(rpc_server.serve()) + .for_each(|resp| async move { + tokio::spawn(resp); + }) + .await; + }) + }; + + rpc_server.register().await; + let _ = fut.await; + rpc_server.unregister().await; +} + +pub async fn kube_data_plane_websocket( + websocket: WebSocketUpgrade, + headers: HeaderMap, + State(state): State>, +) -> Result { + tracing::debug!("Handling data plane WebSocket connection"); + let raw_token = headers + .get(KUBE_CLUSTER_TOKEN_HEADER) + .ok_or(ApiError::Unauthenticated)? + .to_str()? + .to_owned(); + let token = HashedToken::parse(&raw_token); + let token = state + .db + .get_kube_token(token.expose_secret()) + .await? + .ok_or(ApiError::Unauthenticated)?; + + state.db.update_kube_token_last_used(token.id).await?; + let cluster = state + .db + .get_kube_cluster(token.cluster_id) + .await? + .ok_or_else(|| { + ApiError::InternalError(format!("Cluster {} doesn't exist", token.cluster_id)) + })?; + + tracing::debug!("Handling data plane WebSocket for cluster: {}", cluster.id); + Ok(handle_data_plane_websocket( + websocket, state, cluster.id, raw_token, + )) +} + +fn handle_data_plane_websocket( + websocket: WebSocketUpgrade, + state: Arc, + cluster_id: Uuid, + token: String, +) -> Response { + websocket + .on_failed_upgrade(|e| tracing::error!("data plane websocket upgrade failed {e:?}")) + .on_upgrade(move |socket| { + let token = token.clone(); + async move { + handle_data_plane_tunnel(socket, state, cluster_id, token).await; + } + }) +} + +async fn handle_data_plane_tunnel( + socket: WebSocket, + state: Arc, + cluster_id: Uuid, + token: String, +) { + tracing::info!("Data plane tunnel established for cluster: {}", cluster_id); + + let (sink, stream) = split_axum_websocket(socket); + let udp_socket = + WebSocketUdpSocket::from_parts(sink, stream, relay_server_addr(), relay_client_addr()); + + let connection = match RelayEndpoint::udp_server_connection(udp_socket).await { + Ok(connection) => connection, + Err(err) => { + tracing::warn!( + cluster_id = %cluster_id, + error = %err, + "Failed to negotiate QUIC transport for data plane tunnel" + ); + return; + } + }; + + let tunnel_client = Arc::new(TunnelClient::new_relay(connection)); + + let generation = state + .kube_controller + .tunnel_registry + .register_client(cluster_id, Arc::clone(&tunnel_client)) + .await; + + tracing::info!( + "Tunnel client registered for cluster {}; waiting for disconnect", + cluster_id + ); + + tunnel_client.closed().await; + + tracing::info!("Data plane tunnel closed for cluster: {}", cluster_id); + state + .kube_controller + .tunnel_registry + .remove_client(cluster_id, generation) + .await; +} + +pub async fn sidecar_tunnel_websocket( + Path((environment_id, workload_id)): Path<(Uuid, Uuid)>, + headers: HeaderMap, + websocket: WebSocketUpgrade, + State(state): State>, +) -> Result { + tracing::debug!( + "Handling sidecar tunnel WebSocket for environment {} workload {}", + environment_id, + workload_id + ); + + // Get the environment auth token from headers + let auth_token = extract_environment_token(&headers)?; + + // Get the environment and validate auth token + let environment = state + .db + .get_kube_environment(environment_id) + .await? + .ok_or(ApiError::Unauthenticated)?; + + // Validate the auth token matches + if auth_token != environment.auth_token { + return Err(ApiError::Unauthenticated); + } + + tracing::debug!( + "Sidecar authenticated for environment {} workload {} using auth token", + environment.id, + workload_id + ); + + let user_id = environment.user_id; + let registry = state.devbox_tunnels.clone(); + + Ok(websocket.on_upgrade(move |socket| { + let registry = registry.clone(); + async move { + if let Err(err) = registry.attach_sidecar(user_id, socket).await { + tracing::warn!( + user_id = %user_id, + environment_id = %environment_id, + workload_id = %workload_id, + error = %err, + "Sidecar tunnel terminated with error" + ); + } + } + })) +} + +fn extract_environment_token(headers: &HeaderMap) -> Result<&str, ApiError> { + headers + .get(KUBE_ENVIRONMENT_TOKEN_HEADER) + .ok_or(ApiError::Unauthenticated)? + .to_str() + .map_err(|_| ApiError::Unauthenticated) +} + +pub async fn devbox_proxy_tunnel_websocket( + Path(environment_id): Path, + headers: HeaderMap, + websocket: WebSocketUpgrade, + State(state): State>, +) -> Result { + tracing::debug!( + "Handling devbox proxy tunnel WebSocket for environment {}", + environment_id + ); + + // Get the environment auth token from headers + let auth_token = headers + .get(KUBE_ENVIRONMENT_TOKEN_HEADER) + .ok_or(ApiError::Unauthenticated)? + .to_str() + .map_err(|_| ApiError::Unauthenticated)?; + + // Get the environment and validate auth token + let environment = state + .db + .get_kube_environment(environment_id) + .await? + .ok_or(ApiError::Unauthenticated)?; + + // Validate the auth token matches + if auth_token != environment.auth_token { + return Err(ApiError::Unauthenticated); + } + + tracing::info!( + "Devbox proxy authenticated for environment {} using auth token", + environment.id + ); + + let cluster_id = environment.cluster_id; + let auth_token = environment.auth_token.clone(); + let tunnel_registry = state.kube_controller.tunnel_registry.clone(); + + Ok(websocket.on_upgrade(move |socket| { + let registry = tunnel_registry.clone(); + let token = auth_token.clone(); + async move { + match registry.get_client(cluster_id).await { + Some(client) if !client.is_closed() => { + serve_devbox_proxy_tunnel(socket, client, token).await; + } + _ => { + tracing::warn!( + "No active cluster tunnel available for devbox proxy environment {} (cluster {})", + environment_id, + cluster_id + ); + } + } + } + })) +} + +async fn serve_devbox_proxy_tunnel( + socket: WebSocket, + cluster_client: Arc, + token: String, +) { + let (sink, stream) = split_axum_websocket(socket); + let udp_socket = + WebSocketUdpSocket::from_parts(sink, stream, relay_server_addr(), relay_client_addr()); + + let connection = match RelayEndpoint::udp_server_connection(udp_socket).await { + Ok(connection) => connection, + Err(err) => { + tracing::warn!("Failed to negotiate QUIC relay for devbox proxy: {}", err); + return; + } + }; + + let connector_client = cluster_client.clone(); + + let connector = move |target: TunnelTarget| { + let client = connector_client.clone(); + async move { + let TunnelTarget { host, port } = target; + let stream = client.connect_tcp(host, port).await?; + Ok::(Box::new(stream) as DynTunnelStream) + } + }; + + if let Err(err) = run_tunnel_server_with_connector(connection, connector).await { + tracing::warn!("Devbox proxy tunnel terminated with error: {}", err); + } +} diff --git a/crates/api/src/kube_controller/app_catalog.rs b/crates/api/src/kube_controller/app_catalog.rs new file mode 100644 index 0000000..81052ed --- /dev/null +++ b/crates/api/src/kube_controller/app_catalog.rs @@ -0,0 +1,1020 @@ +use k8s_openapi::api::core::v1::{ConfigMap, Secret, Service}; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; +use lapdev_common::kube::{ + KubeAppCatalog, KubeAppCatalogWorkload, KubeAppCatalogWorkloadCreate, KubeServiceDetails, + KubeServiceWithYaml, KubeWorkloadDetails, PagePaginationParams, PaginatedInfo, PaginatedResult, + ProxyPortRoute, DEFAULT_SIDECAR_PROXY_METRICS_PORT, DEFAULT_SIDECAR_PROXY_PORT, + SIDECAR_PROXY_DYNAMIC_PORT_START, +}; +use lapdev_db::api::{CachedClusterService, NewEnvironmentWorkload}; +use lapdev_db_entities::kube_app_catalog_workload_dependency; +use lapdev_kube_rpc::{ + KubeWorkloadYamlOnly, KubeWorkloadsWithResources, NamespacedResourceRequest, + NamespacedResourceResponse, +}; +use lapdev_rpc::error::ApiError; +use sea_orm::{ColumnTrait, EntityTrait, QueryFilter, TransactionTrait}; +use std::collections::{HashMap, HashSet}; +use std::convert::TryFrom; +use uuid::Uuid; + +use crate::kube_controller::resources::clean_service; + +use super::{ + resources::{clean_configmap, clean_secret, rebuild_workload_yaml, SidecarInjectionOptions}, + yaml_parser::build_workload_details_from_yaml, + KubeController, +}; +use chrono::Utc; + +pub struct SidecarInjectionContext<'a> { + pub environment_id: Uuid, + pub namespace: &'a str, + pub auth_token: &'a str, + pub manager_namespace: Option<&'a str>, +} + +struct PreparedWorkloads { + workload_yamls: Vec, + proxy_routes: HashMap>, + workloads_for_details: Vec, + env_workload_ids: HashMap, +} + +impl KubeController { + pub(super) async fn enrich_workloads_with_details( + &self, + cluster_id: Uuid, + workloads: Vec, + ) -> Result, ApiError> { + // Get cluster connection + let cluster_server = self + .get_random_kube_cluster_server(cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest("No connected KubeManager for this cluster".to_string()) + })?; + + // Convert to WorkloadIdentifier for RPC call + let workload_identifiers: Vec = workloads + .iter() + .map(|w| lapdev_kube_rpc::WorkloadIdentifier { + name: w.name.clone(), + namespace: w.namespace.clone(), + kind: w.kind.clone(), + }) + .collect(); + + let raw_workloads = cluster_server + .rpc_client + .get_workloads_raw_yaml(Self::kube_manager_rpc_context(), workload_identifiers) + .await + .map_err(|e| ApiError::InvalidRequest(format!("Failed to get workload YAML: {}", e)))? + .map_err(|e| ApiError::InvalidRequest(format!("KubeManager error: {}", e)))?; + + let mut namespaces: HashSet = + raw_workloads.iter().map(|w| w.namespace.clone()).collect(); + namespaces.extend(workloads.iter().map(|w| w.namespace.clone())); + + let mut services_cache: HashMap> = HashMap::new(); + for namespace in namespaces { + let services = self + .db + .get_active_cluster_services(cluster_id, &namespace) + .await + .map_err(ApiError::from)?; + services_cache.insert(namespace, services); + } + + let mut results = Vec::with_capacity(raw_workloads.len()); + for raw in raw_workloads { + let services = services_cache + .get(&raw.namespace) + .map(|s| s.as_slice()) + .unwrap_or(&[]); + match build_workload_details_from_yaml(raw, services) { + Ok(details) => results.push(details), + Err(err) => { + return Err(ApiError::InvalidRequest(format!( + "Failed to process workload YAML: {}", + err + ))) + } + } + } + + Ok(results) + } + + /// Build KubeWorkloadsWithResources from database-cached catalog data. + /// This retrieves workload YAML and services from the database cache instead of querying Kubernetes. + /// Much faster and doesn't require connectivity to the source cluster. + /// Uses workload label and service selector tables to find only services that select the workloads' pods. + pub(super) async fn get_catalog_workloads_with_yaml_from_db( + &self, + cluster_id: Uuid, + workloads: Vec, + injection_ctx: &SidecarInjectionContext<'_>, + ) -> Result<(KubeWorkloadsWithResources, Vec), ApiError> { + let workload_ids: Vec = workloads.iter().map(|w| w.id).collect(); + let services_by_workload = self + .load_services_for_catalog_workloads(cluster_id, &workload_ids) + .await?; + + let PreparedWorkloads { + workload_yamls, + proxy_routes, + workloads_for_details, + env_workload_ids, + } = self.prepare_workloads_from_cache(&workloads, &services_by_workload, injection_ctx)?; + + let services_map = + self.prepare_services_from_cache(&services_by_workload, &proxy_routes)?; + + let (configmaps_map, secrets_map) = self + .fetch_dependency_resources(cluster_id, &workload_ids) + .await?; + + tracing::info!( + "Built catalog workload resources from DB: {} workloads, {} services, {} configmaps, {} secrets (cluster: {})", + workload_yamls.len(), + services_map.len(), + configmaps_map.len(), + secrets_map.len(), + cluster_id, + ); + + let workload_details = Self::prepare_workload_details_from_catalog( + workloads_for_details, + injection_ctx.namespace, + &env_workload_ids, + )?; + + Ok(( + KubeWorkloadsWithResources { + workloads: workload_yamls, + services: services_map, + configmaps: configmaps_map, + secrets: secrets_map, + }, + workload_details, + )) + } + + async fn load_services_for_catalog_workloads( + &self, + cluster_id: Uuid, + workload_ids: &[Uuid], + ) -> Result>, ApiError> { + self.db + .get_services_for_catalog_workloads(cluster_id, workload_ids) + .await + .map_err(ApiError::from) + } + + fn prepare_workloads_from_cache( + &self, + workloads: &[KubeAppCatalogWorkload], + services_by_workload: &HashMap>, + injection_ctx: &SidecarInjectionContext<'_>, + ) -> Result { + let mut workload_yamls = Vec::with_capacity(workloads.len()); + let mut proxy_routes: HashMap> = HashMap::new(); + let mut workloads_for_details = Vec::with_capacity(workloads.len()); + let mut env_workload_ids: HashMap = HashMap::with_capacity(workloads.len()); + + for workload in workloads { + if workload.workload_yaml.trim().is_empty() { + return Err(ApiError::InvalidRequest(format!( + "Workload '{}' has no cached YAML in database", + workload.name + ))); + } + + let routes = Self::build_proxy_routes( + services_by_workload + .get(&workload.id) + .map(|services| services.as_slice()), + ); + + let env_workload_id = *env_workload_ids + .entry(workload.id) + .or_insert_with(Uuid::new_v4); + + let raw_yaml = workload.workload_yaml.clone(); + let sidecar_options = SidecarInjectionOptions { + environment_id: injection_ctx.environment_id, + workload_id: env_workload_id, + namespace: injection_ctx.namespace, + auth_token: injection_ctx.auth_token, + manager_namespace: injection_ctx.manager_namespace, + proxy_routes: routes.as_slice(), + }; + + let rebuilt_yaml = rebuild_workload_yaml( + &workload.kind, + &raw_yaml, + &workload.containers, + Some(&sidecar_options), + ) + .map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to reconstruct workload YAML for '{}': {}", + workload.name, err + )) + })?; + + let mut workload_for_details = workload.clone(); + workload_for_details.workload_yaml = rebuilt_yaml.clone(); + workloads_for_details.push(workload_for_details); + + let workload_yaml_only = Self::to_workload_yaml_only(&workload.kind, rebuilt_yaml); + workload_yamls.push(workload_yaml_only); + + if !routes.is_empty() { + proxy_routes.insert(workload.id, routes); + } + } + + Ok(PreparedWorkloads { + workload_yamls, + proxy_routes, + workloads_for_details, + env_workload_ids, + }) + } + + fn prepare_services_from_cache( + &self, + services_by_workload: &HashMap>, + proxy_routes: &HashMap>, + ) -> Result, ApiError> { + let mut services_map = HashMap::new(); + + for services in services_by_workload.values() { + for service in services { + if services_map.contains_key(&service.name) { + continue; + } + + let ports = service.ports.clone(); + let selector = service.selector.clone(); + let parsed: Service = + serde_yaml::from_str(&service.service_yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse cached Service '{}' YAML: {}", + service.name, err + )) + })?; + let cleaned = clean_service(parsed); + let cleaned_yaml = serde_yaml::to_string(&cleaned).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize cleaned Service '{}' YAML: {}", + service.name, err + )) + })?; + + services_map.insert( + service.name.clone(), + KubeServiceWithYaml { + yaml: cleaned_yaml, + details: KubeServiceDetails { + name: service.name.clone(), + ports, + selector, + }, + }, + ); + } + } + + for (workload_id, routes) in proxy_routes { + if let Some(services) = services_by_workload.get(workload_id) { + for service in services { + if let Some(entry) = services_map.get_mut(&service.name) { + rewrite_service_for_sidecar(entry, routes)?; + } + } + } + } + + Ok(services_map) + } + + async fn fetch_dependency_resources( + &self, + cluster_id: Uuid, + workload_ids: &[Uuid], + ) -> Result<(HashMap, HashMap), ApiError> { + if workload_ids.is_empty() { + return Ok((HashMap::new(), HashMap::new())); + } + + let dependency_rows = kube_app_catalog_workload_dependency::Entity::find() + .filter( + kube_app_catalog_workload_dependency::Column::WorkloadId + .is_in(workload_ids.to_vec()), + ) + .filter(kube_app_catalog_workload_dependency::Column::DeletedAt.is_null()) + .all(&self.db.conn) + .await + .map_err(ApiError::from)?; + + let mut dependency_requests: HashMap, HashSet)> = + HashMap::new(); + + for row in dependency_rows { + let entry = dependency_requests + .entry(row.namespace.clone()) + .or_default(); + match row.resource_type.as_str() { + "configmap" => { + entry.0.insert(row.resource_name.clone()); + } + "secret" => { + entry.1.insert(row.resource_name.clone()); + } + _ => {} + } + } + + if dependency_requests.is_empty() { + return Ok((HashMap::new(), HashMap::new())); + } + + let cluster_server = self + .get_random_kube_cluster_server(cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for the app catalog's source cluster".to_string(), + ) + })?; + + let requests: Vec = dependency_requests + .into_iter() + .map( + |(namespace, (configmaps, secrets))| NamespacedResourceRequest { + namespace, + configmaps: configmaps.into_iter().collect(), + secrets: secrets.into_iter().collect(), + }, + ) + .collect(); + + if requests.is_empty() { + return Ok((HashMap::new(), HashMap::new())); + } + + let responses = match cluster_server + .rpc_client + .get_namespaced_resources(Self::kube_manager_rpc_context(), requests) + .await + { + Ok(Ok(res)) => res, + Ok(Err(e)) => { + return Err(ApiError::InvalidRequest(format!( + "Failed to fetch ConfigMaps/Secrets: {e}" + ))) + } + Err(e) => { + return Err(ApiError::InvalidRequest(format!( + "Connection error to source cluster: {e}" + ))) + } + }; + + Self::extract_dependency_resources(responses) + } + + fn build_proxy_routes(services: Option<&[CachedClusterService]>) -> Vec { + let mut routes = Vec::new(); + + if let Some(services) = services { + let mut assigned_ports: HashMap<(u16, u16), u16> = HashMap::new(); + let mut used_proxy_ports: HashSet = HashSet::new(); + used_proxy_ports.insert(DEFAULT_SIDECAR_PROXY_PORT); + used_proxy_ports.insert(DEFAULT_SIDECAR_PROXY_METRICS_PORT); + let mut next_proxy_port = u32::from(SIDECAR_PROXY_DYNAMIC_PORT_START); + + for service in services { + for port in &service.ports { + let service_port = match u16::try_from(port.port) { + Ok(port) if port > 0 => port, + _ => continue, + }; + + let raw_target = port.original_target_port.or(port.target_port); + let target_port = match raw_target { + Some(value) => match u16::try_from(value) { + Ok(tp) if tp > 0 => tp, + _ => continue, + }, + None => { + // TODO: support named targetPort values when the service uses port names. + continue; + } + }; + + let key = (service_port, target_port); + let proxy_port = if let Some(existing) = assigned_ports.get(&key) { + *existing + } else { + let allocated = loop { + if next_proxy_port > u32::from(u16::MAX) { + tracing::warn!( + service_port, + target_port, + "exhausted dynamic proxy port range; reusing service port for proxy" + ); + break service_port; + } + + let candidate = next_proxy_port as u16; + next_proxy_port += 1; + + if candidate == service_port || candidate == target_port { + continue; + } + + if !used_proxy_ports.insert(candidate) { + continue; + } + + break candidate; + }; + + assigned_ports.insert(key, allocated); + allocated + }; + + routes.push(ProxyPortRoute { + proxy_port, + service_port, + target_port, + }); + } + } + } + + routes + } + + fn to_workload_yaml_only( + kind: &lapdev_common::kube::KubeWorkloadKind, + yaml: String, + ) -> KubeWorkloadYamlOnly { + match kind { + lapdev_common::kube::KubeWorkloadKind::Deployment => { + KubeWorkloadYamlOnly::Deployment(yaml) + } + lapdev_common::kube::KubeWorkloadKind::StatefulSet => { + KubeWorkloadYamlOnly::StatefulSet(yaml) + } + lapdev_common::kube::KubeWorkloadKind::DaemonSet => { + KubeWorkloadYamlOnly::DaemonSet(yaml) + } + lapdev_common::kube::KubeWorkloadKind::ReplicaSet => { + KubeWorkloadYamlOnly::ReplicaSet(yaml) + } + lapdev_common::kube::KubeWorkloadKind::Pod => KubeWorkloadYamlOnly::Pod(yaml), + lapdev_common::kube::KubeWorkloadKind::Job => KubeWorkloadYamlOnly::Job(yaml), + lapdev_common::kube::KubeWorkloadKind::CronJob => KubeWorkloadYamlOnly::CronJob(yaml), + } + } + + fn extract_dependency_resources( + responses: Vec, + ) -> Result<(HashMap, HashMap), ApiError> { + let mut configmaps_map: HashMap = HashMap::new(); + let mut secrets_map: HashMap = HashMap::new(); + + for response in responses { + for (name, yaml) in response.configmaps { + let parsed: ConfigMap = serde_yaml::from_str(&yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse ConfigMap '{name}' from namespace {}: {err}", + response.namespace + )) + })?; + let cleaned = clean_configmap(parsed); + let cleaned_yaml = serde_yaml::to_string(&cleaned).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize ConfigMap '{name}' from namespace {}: {err}", + response.namespace + )) + })?; + configmaps_map.insert(name, cleaned_yaml); + } + + for (name, yaml) in response.secrets { + let parsed: Secret = serde_yaml::from_str(&yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse Secret '{name}' from namespace {}: {err}", + response.namespace + )) + })?; + let cleaned = clean_secret(parsed); + let cleaned_yaml = serde_yaml::to_string(&cleaned).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize Secret '{name}' from namespace {}: {err}", + response.namespace + )) + })?; + secrets_map.insert(name, cleaned_yaml); + } + } + + Ok((configmaps_map, secrets_map)) + } + + pub async fn create_app_catalog( + &self, + org_id: Uuid, + user_id: Uuid, + cluster_id: Uuid, + name: String, + description: Option, + workloads: Vec, + ) -> Result { + // Get enriched workload details from KubeManager + let enriched_workloads = self + .enrich_workloads_with_details(cluster_id, workloads) + .await?; + + let namespaces: HashSet = enriched_workloads + .iter() + .map(|workload| workload.namespace.clone()) + .collect(); + + let catalog_id = self + .db + .create_app_catalog_with_enriched_workloads( + org_id, + user_id, + cluster_id, + name, + description, + enriched_workloads, + ) + .await + .map_err(ApiError::from)?; + + if !namespaces.is_empty() { + let cluster_servers = { + let servers = self.kube_cluster_servers.read().await; + servers + .get(&cluster_id) + .map(|entries| entries.values().cloned().collect::>()) + }; + + if let Some(cluster_servers) = cluster_servers { + for server in cluster_servers { + for namespace in &namespaces { + if let Err(err) = server.add_namespace_watch(namespace.clone()).await { + tracing::warn!( + cluster_id = %cluster_id, + namespace = namespace.as_str(), + error = ?err, + "Failed to add namespace watch for catalog namespace" + ); + } + } + } + } else { + tracing::debug!( + cluster_id = %cluster_id, + "No connected KubeManager instances; skipping namespace watch add" + ); + } + } + + Ok(catalog_id) + } + + pub async fn get_all_app_catalogs( + &self, + org_id: Uuid, + search: Option, + pagination: Option, + ) -> Result, ApiError> { + let pagination = pagination.unwrap_or_default(); + + let (catalogs_with_clusters, total_count) = self + .db + .get_all_app_catalogs_paginated(org_id, search, Some(pagination.clone())) + .await + .map_err(ApiError::from)?; + + let app_catalogs = catalogs_with_clusters + .into_iter() + .filter_map(|(catalog, cluster)| { + let cluster = cluster?; + Some(KubeAppCatalog { + id: catalog.id, + name: catalog.name, + description: catalog.description, + created_at: catalog.created_at, + created_by: catalog.created_by, + cluster_id: catalog.cluster_id, + cluster_name: cluster.name, + last_sync_actor_id: catalog.last_sync_actor_id, + }) + }) + .collect(); + + let total_pages = (total_count + pagination.page_size - 1) / pagination.page_size; + + Ok(PaginatedResult { + data: app_catalogs, + pagination_info: PaginatedInfo { + total_count, + page: pagination.page, + page_size: pagination.page_size, + total_pages, + }, + }) + } + + pub async fn get_app_catalog( + &self, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result { + // Get catalog with cluster info + let (catalog, cluster) = self + .db + .get_all_app_catalogs_paginated(org_id, None, None) + .await + .map_err(ApiError::from)? + .0 + .into_iter() + .find(|(cat, _)| cat.id == catalog_id) + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + let cluster = + cluster.ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + Ok(KubeAppCatalog { + id: catalog.id, + name: catalog.name, + description: catalog.description, + created_at: catalog.created_at, + created_by: catalog.created_by, + cluster_id: catalog.cluster_id, + cluster_name: cluster.name, + last_sync_actor_id: catalog.last_sync_actor_id, + }) + } + + pub async fn get_app_catalog_workloads( + &self, + org_id: Uuid, + catalog_id: Uuid, + ) -> Result, ApiError> { + // Verify catalog belongs to the organization + let catalog = self + .db + .get_app_catalog(catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + self.db + .get_app_catalog_workloads(catalog_id) + .await + .map_err(ApiError::from) + } + + pub async fn delete_app_catalog(&self, org_id: Uuid, catalog_id: Uuid) -> Result<(), ApiError> { + // Verify catalog belongs to the organization + let catalog = self + .db + .get_app_catalog(catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Check for dependencies - kube_environment (any user's environments using this catalog) + let has_environments = self + .db + .check_app_catalog_has_environments(catalog_id) + .await + .map_err(ApiError::from)?; + + if has_environments { + return Err(ApiError::InvalidRequest( + "Cannot delete app catalog: it has active environments. Please delete them first." + .to_string(), + )); + } + + // Soft delete the app catalog + self.db + .delete_app_catalog(catalog_id) + .await + .map_err(ApiError::from)?; + + self.refresh_cluster_namespace_watches(catalog.cluster_id) + .await; + + Ok(()) + } + + pub async fn delete_app_catalog_workload( + &self, + org_id: Uuid, + workload_id: Uuid, + ) -> Result<(), ApiError> { + // First get the workload to find its catalog + let workload = self + .db + .get_app_catalog_workload(workload_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Workload not found".to_string()))?; + + // Verify the catalog belongs to the organization + let catalog = self + .db + .get_app_catalog(workload.app_catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Delete the workload + self.db + .delete_app_catalog_workload(workload_id) + .await + .map_err(ApiError::from)?; + + let _ = self + .db + .bump_app_catalog_sync_version(workload.app_catalog_id, Utc::now().into()) + .await + .map_err(ApiError::from)?; + + self.refresh_cluster_namespace_watches(workload.cluster_id) + .await; + + Ok(()) + } + + pub async fn update_app_catalog_workload( + &self, + org_id: Uuid, + workload_id: Uuid, + containers: Vec, + ) -> Result<(), ApiError> { + // Validate containers + super::validation::validate_containers(&containers)?; + + // First get the workload to find its catalog + let workload = self + .db + .get_app_catalog_workload(workload_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Workload not found".to_string()))?; + + // Verify the catalog belongs to the organization + let catalog = self + .db + .get_app_catalog(workload.app_catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Update the workload containers + self.db + .update_app_catalog_workload(workload_id, containers) + .await + .map_err(ApiError::from)?; + + let new_version = self + .db + .bump_app_catalog_sync_version(catalog.id, Utc::now().into()) + .await + .map_err(ApiError::from)?; + + self.db + .update_catalog_workload_versions(&[workload.id], new_version) + .await + .map_err(ApiError::from) + } + + pub async fn add_workloads_to_app_catalog( + &self, + org_id: Uuid, + _user_id: Uuid, + catalog_id: Uuid, + workloads: Vec, + ) -> Result<(), ApiError> { + // Verify the catalog belongs to the organization + let catalog = self + .db + .get_app_catalog(catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Enrich workloads with details from KubeManager + let enriched_workloads = self + .enrich_workloads_with_details(catalog.cluster_id, workloads) + .await?; + + // Add enriched workloads to the catalog + let txn = self.db.conn.begin().await.map_err(ApiError::from)?; + let now = chrono::Utc::now().into(); + + let inserted_ids = match self + .db + .insert_enriched_workloads_to_catalog( + &txn, + catalog_id, + catalog.cluster_id, + enriched_workloads, + now, + ) + .await + { + Ok(ids) => ids, + Err(db_err) => { + txn.rollback().await.map_err(ApiError::from)?; + if matches!( + db_err.sql_err(), + Some(sea_orm::SqlErr::UniqueConstraintViolation(_)) + ) { + return Err(ApiError::InvalidRequest( + "One or more selected workloads already exist in this catalog".to_string(), + )); + } else { + return Err(ApiError::from(anyhow::Error::from(db_err))); + } + } + }; + + txn.commit().await.map_err(ApiError::from)?; + + let new_version = self + .db + .bump_app_catalog_sync_version(catalog.id, Utc::now().into()) + .await + .map_err(ApiError::from)?; + + self.db + .update_catalog_workload_versions(&inserted_ids, new_version) + .await + .map_err(ApiError::from)?; + + self.refresh_cluster_namespace_watches(catalog.cluster_id) + .await; + + Ok(()) + } +} + +fn rewrite_service_for_sidecar( + service_entry: &mut KubeServiceWithYaml, + routes: &[ProxyPortRoute], +) -> Result<(), ApiError> { + if routes.is_empty() { + return Ok(()); + } + + let mut service: Service = serde_yaml::from_str(&service_entry.yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse cached Service '{}' YAML: {}", + service_entry.details.name, err + )) + })?; + + if let Some(spec) = service.spec.as_mut() { + if let Some(ports) = spec.ports.as_mut() { + for port in ports.iter_mut() { + let service_port = port.port; + if let Some(route) = routes + .iter() + .find(|route| route.service_port as i32 == service_port) + { + port.target_port = Some(IntOrString::Int(route.proxy_port as i32)); + } + } + } + } + + service_entry.yaml = serde_yaml::to_string(&service).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize Service '{}' YAML after sidecar rewrite: {}", + service_entry.details.name, err + )) + })?; + + for port in service_entry.details.ports.iter_mut() { + if let Some(route) = routes + .iter() + .find(|route| route.service_port as i32 == port.port) + { + if port.original_target_port.is_none() { + port.original_target_port = Some(route.target_port as i32); + } + port.target_port = Some(route.proxy_port as i32); + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use k8s_openapi::api::core::v1::Service; + use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + use lapdev_common::kube::KubeServicePort; + use std::collections::BTreeMap; + + #[test] + fn rewrite_service_preserves_original_target_port() { + let initial_yaml = r#" +apiVersion: v1 +kind: Service +metadata: + name: test +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + selector: + app: demo +"# + .to_string(); + + let mut service_entry = KubeServiceWithYaml { + yaml: initial_yaml, + details: KubeServiceDetails { + name: "test".to_string(), + ports: vec![KubeServicePort { + name: None, + port: 80, + target_port: Some(8080), + protocol: Some("TCP".to_string()), + node_port: None, + original_target_port: None, + }], + selector: BTreeMap::new(), + }, + }; + + let routes = vec![ProxyPortRoute { + proxy_port: DEFAULT_SIDECAR_PROXY_PORT, + service_port: 80, + target_port: 8080, + }]; + + rewrite_service_for_sidecar(&mut service_entry, &routes).unwrap(); + + let updated: Service = serde_yaml::from_str(&service_entry.yaml).unwrap(); + let rewritten_target = updated + .spec + .as_ref() + .and_then(|spec| spec.ports.as_ref()) + .and_then(|ports| ports.first()) + .and_then(|port| port.target_port.clone()) + .expect("target port should be set"); + + let target_value = match rewritten_target { + IntOrString::Int(value) => value, + IntOrString::String(_) => panic!("expected numeric target port"), + }; + + assert_eq!(target_value, DEFAULT_SIDECAR_PROXY_PORT as i32); + + let details_port = &service_entry.details.ports[0]; + assert_eq!( + details_port.target_port, + Some(DEFAULT_SIDECAR_PROXY_PORT as i32) + ); + assert_eq!(details_port.original_target_port, Some(8080)); + } +} diff --git a/crates/api/src/kube_controller/cluster.rs b/crates/api/src/kube_controller/cluster.rs new file mode 100644 index 0000000..5c65b77 --- /dev/null +++ b/crates/api/src/kube_controller/cluster.rs @@ -0,0 +1,398 @@ +use std::str::FromStr; +use uuid::Uuid; + +use lapdev_common::{ + kube::{ + CreateKubeClusterResponse, KubeCluster, KubeClusterInfo, KubeClusterStatus, + KubeNamespaceInfo, KubeWorkload, KubeWorkloadKind, KubeWorkloadList, PaginationParams, + }, + token::PlainToken, +}; +use lapdev_rpc::error::ApiError; +use secrecy::ExposeSecret; + +use super::KubeController; + +impl KubeController { + pub async fn get_all_kube_clusters(&self, org_id: Uuid) -> Result, ApiError> { + let clusters = self + .db + .get_all_kube_clusters(org_id) + .await + .map_err(ApiError::from)? + .into_iter() + .map(|c| KubeCluster { + id: c.id, + name: c.name.clone(), + can_deploy_personal: c.can_deploy_personal, + can_deploy_shared: c.can_deploy_shared, + info: KubeClusterInfo { + cluster_version: c.cluster_version.unwrap_or("Unknown".to_string()), + node_count: 0, // TODO: Get actual node count from kube-manager + available_cpu: "N/A".to_string(), // TODO: Get actual CPU from kube-manager + available_memory: "N/A".to_string(), // TODO: Get actual memory from kube-manager + provider: c.provider.clone(), + region: c.region, + status: KubeClusterStatus::from_str(&c.status) + .unwrap_or(KubeClusterStatus::NotReady), + manager_namespace: c.manager_namespace.clone(), + }, + }) + .collect(); + Ok(clusters) + } + + pub async fn create_kube_cluster( + &self, + org_id: Uuid, + user_id: Uuid, + name: String, + ) -> Result { + // Generate cluster ID and token + let cluster_id = Uuid::new_v4(); + let token = PlainToken::generate(); + let hashed_token = token.hashed(); + let token_name = format!("{name}-default"); + + // Create the cluster + self.db + .create_kube_cluster( + cluster_id, + org_id, + user_id, + name, + KubeClusterStatus::Provisioning.to_string(), + ) + .await + .map_err(ApiError::from)?; + + // Create the cluster token + self.db + .create_kube_cluster_token( + cluster_id, + user_id, + token_name, + hashed_token.expose_secret().to_vec(), + ) + .await + .map_err(ApiError::from)?; + + Ok(CreateKubeClusterResponse { + cluster_id, + token: token.expose_secret().to_string(), + }) + } + + pub async fn delete_kube_cluster( + &self, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result<(), ApiError> { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Check for dependencies - kube_app_catalog + let has_app_catalogs = self + .db + .check_kube_cluster_has_app_catalogs(cluster_id) + .await + .map_err(ApiError::from)?; + + if has_app_catalogs { + return Err(ApiError::InvalidRequest( + "Cannot delete cluster: it has active app catalogs. Please delete them first." + .to_string(), + )); + } + + // Check for dependencies - kube_environment + let has_environments = self + .db + .check_kube_cluster_has_environments(cluster_id) + .await + .map_err(ApiError::from)?; + + if has_environments { + return Err(ApiError::InvalidRequest( + "Cannot delete cluster: it has active environments. Please delete them first." + .to_string(), + )); + } + + // Soft delete the cluster + self.db + .delete_kube_cluster(cluster_id) + .await + .map_err(ApiError::from) + } + + pub async fn set_cluster_deployable( + &self, + org_id: Uuid, + cluster_id: Uuid, + can_deploy_personal: bool, + can_deploy_shared: bool, + ) -> Result<(), ApiError> { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Update the deployment capability fields + self.db + .set_cluster_deployable(cluster_id, can_deploy_personal, can_deploy_shared) + .await + .map_err(ApiError::from)?; + + Ok(()) + } + + pub async fn set_cluster_personal_deployable( + &self, + org_id: Uuid, + cluster_id: Uuid, + can_deploy: bool, + ) -> Result<(), ApiError> { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Update only the personal deployment capability + self.db + .set_cluster_deployable(cluster_id, can_deploy, cluster.can_deploy_shared) + .await + .map_err(ApiError::from)?; + + Ok(()) + } + + pub async fn set_cluster_shared_deployable( + &self, + org_id: Uuid, + cluster_id: Uuid, + can_deploy: bool, + ) -> Result<(), ApiError> { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Update only the shared deployment capability + self.db + .set_cluster_deployable(cluster_id, cluster.can_deploy_personal, can_deploy) + .await + .map_err(ApiError::from)?; + + Ok(()) + } + + pub async fn get_workloads( + &self, + org_id: Uuid, + cluster_id: Uuid, + namespace: Option, + workload_kind_filter: Option, + include_system_workloads: bool, + pagination: Option, + ) -> Result { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Get a connected KubeClusterServer for this cluster + let server = self + .get_random_kube_cluster_server(cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest("No connected KubeManager for this cluster".to_string()) + })?; + + let pagination = pagination.unwrap_or(PaginationParams { + cursor: None, + limit: 20, + }); + + // Call KubeManager to get workloads + match server + .rpc_client + .get_workloads( + Self::kube_manager_rpc_context(), + namespace, + workload_kind_filter, + include_system_workloads, + Some(pagination), + ) + .await + { + Ok(Ok(workload_list)) => Ok(workload_list), + Ok(Err(e)) => Err(ApiError::InvalidRequest(format!( + "KubeManager error: {}", + e + ))), + Err(e) => Err(ApiError::InvalidRequest(format!("Connection error: {}", e))), + } + } + + pub async fn get_workload_details( + &self, + org_id: Uuid, + cluster_id: Uuid, + name: String, + namespace: String, + ) -> Result, ApiError> { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Get a connected KubeClusterServer for this cluster + let server = self + .get_random_kube_cluster_server(cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest("No connected KubeManager for this cluster".to_string()) + })?; + + // Call KubeManager to get workload details + match server + .rpc_client + .get_workload_details(Self::kube_manager_rpc_context(), name, namespace) + .await + { + Ok(Ok(workload_details)) => Ok(workload_details), + Ok(Err(e)) => Err(ApiError::InvalidRequest(format!( + "KubeManager error: {}", + e + ))), + Err(e) => Err(ApiError::InvalidRequest(format!("Connection error: {}", e))), + } + } + + pub async fn get_cluster_namespaces( + &self, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result, ApiError> { + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Get a connected KubeClusterServer for this cluster + let server = self + .get_random_kube_cluster_server(cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest("No connected KubeManager for this cluster".to_string()) + })?; + + // Call KubeManager to get namespaces + match server + .rpc_client + .get_namespaces(Self::kube_manager_rpc_context()) + .await + { + Ok(Ok(namespaces)) => Ok(namespaces), + Ok(Err(e)) => Err(ApiError::InvalidRequest(format!( + "KubeManager error: {}", + e + ))), + Err(e) => Err(ApiError::InvalidRequest(format!("Connection error: {}", e))), + } + } + + pub async fn get_cluster_info( + &self, + org_id: Uuid, + cluster_id: Uuid, + ) -> Result { + // Get cluster from database + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Convert database cluster to KubeClusterInfo + let cluster_info = KubeClusterInfo { + cluster_version: cluster + .cluster_version + .clone() + .unwrap_or_else(|| "Unknown".to_string()), + node_count: 0, // TODO: Get actual node count from kube-manager + available_cpu: "N/A".to_string(), // TODO: Get actual CPU from kube-manager + available_memory: "N/A".to_string(), // TODO: Get actual memory from kube-manager + provider: cluster.provider.clone(), + region: cluster.region.clone(), + status: KubeClusterStatus::from_str(&cluster.status) + .unwrap_or(KubeClusterStatus::NotReady), + manager_namespace: cluster.manager_namespace.clone(), + }; + + Ok(KubeCluster { + id: cluster.id, + name: cluster.name, + can_deploy_personal: cluster.can_deploy_personal, + can_deploy_shared: cluster.can_deploy_shared, + info: cluster_info, + }) + } +} diff --git a/crates/api/src/kube_controller/container_images.rs b/crates/api/src/kube_controller/container_images.rs new file mode 100644 index 0000000..139076f --- /dev/null +++ b/crates/api/src/kube_controller/container_images.rs @@ -0,0 +1,26 @@ +/// Shared image tag for Lapdev Kubernetes components. +pub const CONTAINER_IMAGE_TAG: &str = "0.2.0"; + +/// Registry/repository for the Lapdev API image. +pub(crate) const API_IMAGE_REPO: &str = "ghcr.io/lapce/lapdev-api"; +/// Registry/repository for the kube-manager controller image. +pub(crate) const KUBE_MANAGER_IMAGE_REPO: &str = "ghcr.io/lapce/lapdev-kube-manager"; +/// Registry/repository for the sidecar proxy image. +pub(crate) const SIDECAR_PROXY_IMAGE_REPO: &str = "ghcr.io/lapce/lapdev-kube-sidecar-proxy"; + +/// Helper returning the Lapdev API image reference with the shared tag. +#[allow(dead_code)] +pub(crate) fn api_image_reference() -> String { + format!("{}:{}", API_IMAGE_REPO, CONTAINER_IMAGE_TAG) +} + +/// Helper returning the kube-manager image reference with the shared tag. +#[allow(dead_code)] +pub(crate) fn kube_manager_image_reference() -> String { + format!("{}:{}", KUBE_MANAGER_IMAGE_REPO, CONTAINER_IMAGE_TAG) +} + +/// Helper returning the sidecar proxy image reference with the shared tag. +pub(crate) fn sidecar_proxy_image_reference() -> String { + format!("{}:{}", SIDECAR_PROXY_IMAGE_REPO, CONTAINER_IMAGE_TAG) +} diff --git a/crates/api/src/kube_controller/deployment.rs b/crates/api/src/kube_controller/deployment.rs new file mode 100644 index 0000000..f9359cf --- /dev/null +++ b/crates/api/src/kube_controller/deployment.rs @@ -0,0 +1,208 @@ +use std::collections::HashMap; + +use super::KubeController; +use lapdev_kube::server::KubeClusterServer; +use lapdev_rpc::error::ApiError; + +impl KubeController { + pub(super) async fn deploy_environment_resources( + &self, + target_server: &KubeClusterServer, + environment: &lapdev_db_entities::kube_environment::Model, + workloads_with_resources: lapdev_kube_rpc::KubeWorkloadsWithResources, + extra_labels: Option>, + ) -> Result<(), ApiError> { + let namespace = &environment.namespace; + let environment_name = &environment.name; + + tracing::info!( + "Deploying environment resources for '{}' in namespace '{}'", + environment_name, + namespace + ); + + if workloads_with_resources.workloads.is_empty() { + tracing::warn!("No workloads found for environment '{}'", environment_name); + return Ok(()); + } + + tracing::info!( + "Found {} workloads to deploy for environment '{}'", + workloads_with_resources.workloads.len(), + environment_name + ); + + // Prepare environment-specific labels + let mut environment_labels = std::collections::HashMap::new(); + environment_labels.insert("lapdev.managed-by".to_string(), "lapdev".to_string()); + if environment.base_environment_id.is_some() { + environment_labels.insert( + "lapdev.io/branch-environment-id".to_string(), + environment.id.to_string(), + ); + } + if let Some(extra) = extra_labels { + for (key, value) in extra { + environment_labels.insert(key, value); + } + } + + // Deploy all workloads and resources in a single call + if environment.auth_token.is_empty() { + return Err(ApiError::InvalidRequest( + "Environment auth token is required".to_string(), + )); + } + let mut ctx = Self::kube_manager_rpc_context(); + + match target_server + .rpc_client + .deploy_workload_yaml( + ctx, + namespace.to_string(), + workloads_with_resources, + environment_labels, + ) + .await + { + Ok(Ok(())) => { + tracing::info!("Successfully deployed all workloads to target cluster"); + Ok(()) + } + Ok(Err(e)) => { + tracing::error!("Failed to deploy workloads to target cluster: {}", e); + Err(ApiError::InvalidRequest(format!( + "Failed to deploy workloads to target cluster: {e}" + ))) + } + Err(e) => Err(ApiError::InvalidRequest(format!( + "Connection error to target cluster: {e}" + ))), + } + } +} + +#[cfg(test)] +mod tests { + use crate::kube_controller::resources; + + use super::super::container_images; + use super::*; + use k8s_openapi::api::apps::v1::{Deployment, DeploymentSpec}; + use k8s_openapi::api::core::v1::{Container, PodSpec, PodTemplateSpec}; + use k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector; + use lapdev_common::kube::{ + DEFAULT_SIDECAR_PROXY_BIND_ADDR, DEFAULT_SIDECAR_PROXY_PORT, + SIDECAR_PROXY_BIND_ADDR_ENV_VAR, SIDECAR_PROXY_MANAGER_ADDR_ENV_VAR, + SIDECAR_PROXY_PORT_ENV_VAR, SIDECAR_PROXY_WORKLOAD_ENV_VAR, + }; + use uuid::Uuid; + + #[test] + fn inject_sidecar_proxy_appends_once() { + let mut deployment = Deployment { + metadata: Default::default(), + spec: Some(DeploymentSpec { + replicas: None, + selector: LabelSelector::default(), + template: PodTemplateSpec { + metadata: Default::default(), + spec: Some(PodSpec { + containers: vec![Container { + name: "primary".to_string(), + ..Default::default() + }], + ..Default::default() + }), + }, + ..Default::default() + }), + status: None, + }; + + let environment_id = Uuid::new_v4(); + let workload_id = Uuid::new_v4(); + let options = resources::SidecarInjectionOptions { + environment_id, + workload_id, + namespace: "lapdev-namespace", + auth_token: "auth-token", + manager_namespace: Some("lapdev"), + proxy_routes: &[], + }; + + resources::inject_sidecar_proxy_into_deployment(&mut deployment, &options).unwrap(); + resources::inject_sidecar_proxy_into_deployment(&mut deployment, &options).unwrap(); + + let pod_spec = deployment + .spec + .as_ref() + .and_then(|spec| spec.template.spec.as_ref()) + .expect("deployment pod spec should exist"); + + let sidecars: Vec<&Container> = pod_spec + .containers + .iter() + .filter(|container| container.name == "lapdev-sidecar-proxy") + .collect(); + assert_eq!(sidecars.len(), 1, "sidecar should be appended exactly once"); + + let sidecar = sidecars[0]; + let expected_image = container_images::sidecar_proxy_image_reference(); + assert_eq!( + sidecar.image.as_deref(), + Some(expected_image.as_str()), + "sidecar should use the shared repo:tag reference" + ); + assert!( + sidecar.args.is_none(), + "sidecar should not rely on CLI arguments once injected" + ); + + let env_vars = sidecar + .env + .as_ref() + .expect("sidecar should include required environment variables"); + let bind_addr_var = env_vars + .iter() + .find(|var| var.name == SIDECAR_PROXY_BIND_ADDR_ENV_VAR) + .expect("bind address environment variable should be set"); + assert_eq!( + bind_addr_var.value.as_deref(), + Some(DEFAULT_SIDECAR_PROXY_BIND_ADDR), + "bind address env var should carry the default proxy address" + ); + + let port_var = env_vars + .iter() + .find(|var| var.name == SIDECAR_PROXY_PORT_ENV_VAR) + .expect("port environment variable should be set"); + let expected_port = DEFAULT_SIDECAR_PROXY_PORT.to_string(); + assert_eq!( + port_var.value.as_deref(), + Some(expected_port.as_str()), + "port env var should reflect the default proxy port" + ); + + let manager_addr_var = env_vars + .iter() + .find(|var| var.name == SIDECAR_PROXY_MANAGER_ADDR_ENV_VAR) + .expect("manager addr environment variable should be set"); + assert_eq!( + manager_addr_var.value.as_deref(), + Some("lapdev-kube-manager.lapdev.svc:5001"), + "manager addr env var should default to lapdev namespace" + ); + + let expected_workload_id = workload_id.to_string(); + let workload_var = env_vars + .iter() + .find(|var| var.name == SIDECAR_PROXY_WORKLOAD_ENV_VAR) + .expect("workload environment variable should be set"); + assert_eq!( + workload_var.value.as_deref(), + Some(expected_workload_id.as_str()), + "workload env var should carry the provided workload id" + ); + } +} diff --git a/crates/api/src/kube_controller/environment.rs b/crates/api/src/kube_controller/environment.rs new file mode 100644 index 0000000..8c616ce --- /dev/null +++ b/crates/api/src/kube_controller/environment.rs @@ -0,0 +1,2840 @@ +use chrono::Utc; +use k8s_openapi::api::{ + apps::v1::{DaemonSet, Deployment, ReplicaSet, StatefulSet}, + batch::v1::{CronJob, Job}, + core::v1::{Pod, PodSpec, Service}, +}; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; +use lapdev_common::{ + kube::{ + KubeAppCatalogWorkload, KubeContainerImage, KubeEnvironment, + KubeEnvironmentDashboardSummary, KubeEnvironmentService, KubeEnvironmentStatus, + KubeEnvironmentSyncStatus, KubeServiceDetails, KubeServicePort, KubeServiceWithYaml, + KubeWorkloadDetails, KubeWorkloadKind, PagePaginationParams, PaginatedInfo, + PaginatedResult, + }, + utils::rand_string, +}; +use lapdev_db::api::NewEnvironmentWorkload; +use lapdev_kube::server::KubeClusterServer; +use lapdev_kube_rpc::{KubeWorkloadYamlOnly, KubeWorkloadsWithResources, NamespacedResourceKind}; +use lapdev_rpc::error::ApiError; +use sea_orm::{ + prelude::{DateTimeWithTimeZone, Json}, + sea_query::Expr, + ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, QueryFilter, TransactionTrait, +}; +use serde_yaml; +use std::{ + collections::{HashMap, HashSet}, + str::FromStr, + time::{Duration, Instant}, +}; +use tracing::{error, warn}; +use uuid::Uuid; + +use crate::environment_events::EnvironmentLifecycleEvent; + +use super::{ + app_catalog::SidecarInjectionContext, + resources::{set_cronjob_suspend, set_daemonset_paused, set_workload_replicas}, + workload::{build_branch_service_selector, rename_service_yaml, rename_workload_yaml}, + EnvironmentNamespaceKind, KubeController, +}; + +impl KubeController { + pub(super) async fn generate_unique_namespace( + &self, + _cluster_id: Uuid, + kind: EnvironmentNamespaceKind, + ) -> Result { + let prefix = match kind { + EnvironmentNamespaceKind::Personal => "lapdev-personal", + EnvironmentNamespaceKind::Shared => "lapdev-shared", + EnvironmentNamespaceKind::Branch => "lapdev-branch", + }; + + Ok(format!("{prefix}-{}", rand_string(12))) + } + + pub async fn get_all_kube_environments( + &self, + org_id: Uuid, + user_id: Uuid, + search: Option, + is_shared: bool, + is_branch: bool, + pagination: Option, + ) -> Result, ApiError> { + let pagination = pagination.unwrap_or_default(); + + let (environments_with_catalogs_and_clusters, total_count) = self + .db + .get_all_kube_environments_paginated( + org_id, + user_id, + search, + is_shared, + is_branch, + Some(pagination.clone()), + ) + .await + .map_err(ApiError::from)?; + + let kube_environments = environments_with_catalogs_and_clusters + .into_iter() + .filter_map( + |( + env, + catalog, + cluster, + base_environment_name, + base_environment_catalog_sync_version, + base_environment_last_synced_at, + )| { + let catalog = catalog?; + let cluster = cluster?; + let catalog_sync_version = env.catalog_sync_version; + let status = KubeEnvironmentStatus::from_str(&env.status) + .unwrap_or(KubeEnvironmentStatus::Creating); + let last_catalog_synced_at = + env.last_catalog_synced_at.map(|dt| dt.to_string()); + let paused_at = env.paused_at.map(|dt| dt.to_string()); + let resumed_at = env.resumed_at.map(|dt| dt.to_string()); + let catalog_update_available = match env.base_environment_id { + Some(_) => base_environment_catalog_sync_version + .map(|version| version > catalog_sync_version) + .unwrap_or(false), + None => catalog.sync_version > catalog_sync_version, + }; + + Some(KubeEnvironment { + id: env.id, + name: env.name, + namespace: env.namespace, + app_catalog_id: env.app_catalog_id, + app_catalog_name: catalog.name, + cluster_id: env.cluster_id, + cluster_name: cluster.name, + status, + created_at: env.created_at.to_string(), + user_id: env.user_id, + is_shared: env.is_shared, + base_environment_id: env.base_environment_id, + base_environment_name, + base_environment_catalog_sync_version, + base_environment_last_catalog_synced_at: base_environment_last_synced_at + .map(|dt| dt.to_string()), + catalog_sync_version, + last_catalog_synced_at, + paused_at, + resumed_at, + catalog_update_available, + catalog_last_sync_actor_id: catalog.last_sync_actor_id, + sync_status: KubeEnvironmentSyncStatus::from_str(&env.sync_status) + .unwrap_or(KubeEnvironmentSyncStatus::Idle), + }) + }, + ) + .collect(); + + let total_pages = total_count.div_ceil(pagination.page_size); + + Ok(PaginatedResult { + data: kube_environments, + pagination_info: PaginatedInfo { + total_count, + page: pagination.page, + page_size: pagination.page_size, + total_pages, + }, + }) + } + + pub async fn get_environment_dashboard_summary( + &self, + org_id: Uuid, + user_id: Uuid, + recent_limit: Option, + ) -> Result { + let limit = recent_limit.unwrap_or(5); + self.db + .get_environment_dashboard_summary(org_id, user_id, limit) + .await + .map_err(ApiError::from) + } + + pub async fn get_kube_environment( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result { + // Get the environment from database + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + // Check authorization + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // If it's a personal environment, check ownership + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + // Get related catalog and cluster info + let catalog = self + .db + .get_app_catalog(environment.app_catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + let cluster = self + .db + .get_kube_cluster(environment.cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + // Get base environment name if this is a branch environment + let base_environment = if let Some(base_env_id) = environment.base_environment_id { + self.db + .get_kube_environment(base_env_id) + .await + .map_err(ApiError::from)? + } else { + None + }; + + let base_environment_name = base_environment.as_ref().map(|env| env.name.clone()); + let base_environment_catalog_sync_version = base_environment + .as_ref() + .map(|env| env.catalog_sync_version); + let base_environment_last_synced_at = + base_environment.and_then(|env| env.last_catalog_synced_at); + + let catalog_update_available = if base_environment_catalog_sync_version.is_some() { + base_environment_catalog_sync_version + .map(|version| version > environment.catalog_sync_version) + .unwrap_or(false) + } else { + catalog.sync_version > environment.catalog_sync_version + }; + + let status = KubeEnvironmentStatus::from_str(&environment.status) + .unwrap_or(KubeEnvironmentStatus::Creating); + + Ok(KubeEnvironment { + id: environment.id, + user_id: environment.user_id, + name: environment.name, + namespace: environment.namespace, + app_catalog_id: environment.app_catalog_id, + app_catalog_name: catalog.name, + cluster_id: environment.cluster_id, + cluster_name: cluster.name, + status, + created_at: environment + .created_at + .format("%Y-%m-%d %H:%M:%S%.f %z") + .to_string(), + is_shared: environment.is_shared, + base_environment_id: environment.base_environment_id, + base_environment_name, + base_environment_catalog_sync_version, + base_environment_last_catalog_synced_at: base_environment_last_synced_at + .map(|dt| dt.to_string()), + catalog_sync_version: environment.catalog_sync_version, + last_catalog_synced_at: environment.last_catalog_synced_at.map(|dt| dt.to_string()), + paused_at: environment.paused_at.map(|dt| dt.to_string()), + resumed_at: environment.resumed_at.map(|dt| dt.to_string()), + catalog_last_sync_actor_id: catalog.last_sync_actor_id, + catalog_update_available, + sync_status: KubeEnvironmentSyncStatus::from_str(&environment.sync_status) + .unwrap_or(KubeEnvironmentSyncStatus::Idle), + }) + } + + /// Prepare workload details for environment creation by copying from catalog workloads + pub(super) fn prepare_workload_details_from_catalog( + workloads: Vec, + namespace: &str, + env_workload_ids: &HashMap, + ) -> Result, ApiError> { + workloads + .into_iter() + .map(|workload| { + if workload.workload_yaml.trim().is_empty() { + return Err(ApiError::InvalidRequest(format!( + "Workload {} from catalog is missing YAML", + workload.name + ))); + } + let env_workload_id = env_workload_ids.get(&workload.id).ok_or_else(|| { + ApiError::InvalidRequest(format!( + "Missing generated workload id for catalog workload {}", + workload.name + )) + })?; + let workload_yaml = workload.workload_yaml.clone(); + let mut containers = workload.containers; + for container in &mut containers { + // Preserve the original environment variables + container.original_env_vars = container.env_vars.clone(); + container.env_vars.clear(); + + // If the app catalog has a customized image, use it as the original_image + // for the new environment (so the environment starts from the customized state) + match &container.image { + KubeContainerImage::Custom(custom_image) => { + container.original_image = custom_image.clone(); + container.image = KubeContainerImage::FollowOriginal; + } + KubeContainerImage::FollowOriginal => { + // Keep the current original_image and FollowOriginal setting + } + } + } + Ok(NewEnvironmentWorkload { + id: *env_workload_id, + details: KubeWorkloadDetails { + name: workload.name, + namespace: namespace.to_string(), + kind: workload.kind, + containers, + ports: workload.ports, + workload_yaml, + base_workload_id: None, + }, + }) + }) + .collect() + } + + /// Build the manifest set used for pause/resume. + /// + /// We snapshot workload YAML when the environment is created or synced. This method: + /// 1. Pulls that stored manifest from the DB (fails if missing—we require it for new feature). + /// 2. Wraps each YAML string into the appropriate `KubeWorkloadYamlOnly` variant. + /// 3. Loads stored service YAML so we redeploy exactly what was saved. + /// + /// No catalog fallback: avoiding divergence and ensuring pause/resume replay the original manifest. + async fn assemble_environment_workloads( + &self, + environment: &lapdev_db_entities::kube_environment::Model, + ) -> Result { + let stored_workloads = self + .db + .get_environment_workloads(environment.id) + .await + .map_err(ApiError::from)?; + + if stored_workloads.is_empty() + || stored_workloads + .iter() + .any(|w| w.workload_yaml.trim().is_empty()) + { + return Err(ApiError::InvalidRequest( + "Environment workloads are missing stored manifests".to_string(), + )); + } + + let mut workloads = Vec::new(); + for workload in stored_workloads { + let kind = KubeWorkloadKind::from_str(&workload.kind).map_err(|_| { + ApiError::InvalidRequest(format!( + "Invalid workload kind {} for environment {}", + workload.kind, environment.id + )) + })?; + workloads.push(Self::wrap_workload_yaml(kind, workload.workload_yaml)); + } + + Ok(KubeWorkloadsWithResources { + workloads, + services: HashMap::new(), + configmaps: HashMap::new(), + secrets: HashMap::new(), + }) + } + + /// Build KubeEnvironment response from database model + fn build_environment_response( + created_env: lapdev_db_entities::kube_environment::Model, + app_catalog_name: String, + cluster_name: String, + base_environment_name: Option, + base_environment_catalog_sync_version: Option, + base_environment_last_synced_at: Option, + ) -> lapdev_common::kube::KubeEnvironment { + let status = KubeEnvironmentStatus::from_str(&created_env.status) + .unwrap_or(KubeEnvironmentStatus::Creating); + let base_version = base_environment_catalog_sync_version; + let catalog_update_available = match (created_env.base_environment_id, base_version) { + (Some(_), Some(base)) => base > created_env.catalog_sync_version, + _ => false, + }; + lapdev_common::kube::KubeEnvironment { + id: created_env.id, + user_id: created_env.user_id, + name: created_env.name, + namespace: created_env.namespace, + status, + is_shared: created_env.is_shared, + app_catalog_id: created_env.app_catalog_id, + app_catalog_name, + cluster_id: created_env.cluster_id, + cluster_name, + created_at: created_env.created_at.to_string(), + base_environment_id: created_env.base_environment_id, + base_environment_name, + base_environment_catalog_sync_version: base_version, + base_environment_last_catalog_synced_at: base_environment_last_synced_at + .map(|dt| dt.to_string()), + catalog_sync_version: created_env.catalog_sync_version, + last_catalog_synced_at: created_env.last_catalog_synced_at.map(|dt| dt.to_string()), + paused_at: created_env.paused_at.map(|dt| dt.to_string()), + resumed_at: created_env.resumed_at.map(|dt| dt.to_string()), + catalog_update_available, + catalog_last_sync_actor_id: None, + sync_status: KubeEnvironmentSyncStatus::from_str(&created_env.sync_status) + .unwrap_or(KubeEnvironmentSyncStatus::Idle), + } + } + + fn wrap_workload_yaml(kind: KubeWorkloadKind, yaml: String) -> KubeWorkloadYamlOnly { + match kind { + KubeWorkloadKind::Deployment => KubeWorkloadYamlOnly::Deployment(yaml), + KubeWorkloadKind::StatefulSet => KubeWorkloadYamlOnly::StatefulSet(yaml), + KubeWorkloadKind::DaemonSet => KubeWorkloadYamlOnly::DaemonSet(yaml), + KubeWorkloadKind::ReplicaSet => KubeWorkloadYamlOnly::ReplicaSet(yaml), + KubeWorkloadKind::Pod => KubeWorkloadYamlOnly::Pod(yaml), + KubeWorkloadKind::Job => KubeWorkloadYamlOnly::Job(yaml), + KubeWorkloadKind::CronJob => KubeWorkloadYamlOnly::CronJob(yaml), + } + } + + /// Notify devbox-proxy about branch environment deletion + async fn notify_branch_environment_deletion( + &self, + rpc_client: &lapdev_kube_rpc::KubeManagerRpcClient, + base_env_id: Uuid, + environment_id: Uuid, + ) -> Result<(), ApiError> { + match rpc_client + .remove_branch_environment( + Self::kube_manager_rpc_context(), + base_env_id, + environment_id, + ) + .await + { + Ok(Ok(())) => { + tracing::info!( + "Successfully notified devbox-proxy about branch environment {} deletion", + environment_id + ); + Ok(()) + } + Ok(Err(e)) => { + if e.contains("No devbox-proxy registered for base environment") { + tracing::warn!( + base_environment_id = %base_env_id, + branch_environment_id = %environment_id, + error = %e, + "No devbox-proxy registered for base environment; skipping branch deletion notification" + ); + return Ok(()); + } + tracing::error!( + "Failed to notify devbox-proxy about branch environment {} deletion: {}", + environment_id, + e + ); + Err(ApiError::InvalidRequest(format!( + "Failed to notify devbox-proxy about branch environment deletion: {e}" + ))) + } + Err(e) => { + tracing::error!( + "RPC call failed when notifying about branch environment {} deletion: {}", + environment_id, + e + ); + Err(ApiError::InvalidRequest(format!( + "Connection error while notifying devbox-proxy: {e}" + ))) + } + } + } + + /// Destroy environment resources via RPC + async fn destroy_environment_resources( + &self, + rpc_client: &lapdev_kube_rpc::KubeManagerRpcClient, + environment: &lapdev_db_entities::kube_environment::Model, + ) -> Result<(), ApiError> { + let mut ctx = Self::kube_manager_rpc_context(); + + match rpc_client + .destroy_environment(ctx, environment.id, environment.namespace.clone()) + .await + { + Ok(Ok(())) => { + tracing::info!( + "Successfully deleted resources for environment {} in namespace {}", + environment.id, + environment.namespace + ); + Ok(()) + } + Ok(Err(e)) => { + tracing::error!( + "KubeManager error when deleting environment {}: {}", + environment.id, + e + ); + Err(ApiError::InvalidRequest(format!( + "Failed to delete environment resources: {e}" + ))) + } + Err(e) => { + tracing::error!( + "Connection error when deleting environment {}: {}", + environment.id, + e + ); + Err(ApiError::InvalidRequest(format!( + "Failed to communicate with KubeManager to delete environment: {e}" + ))) + } + } + } + + async fn destroy_branch_environment_resources( + &self, + rpc_client: &lapdev_kube_rpc::KubeManagerRpcClient, + environment: &lapdev_db_entities::kube_environment::Model, + ) -> Result<(), ApiError> { + let mut resources: Vec<(NamespacedResourceKind, String)> = Vec::new(); + + let branch_namespace = environment.namespace.clone(); + + let branch_workloads = self + .db + .get_environment_workloads(environment.id) + .await + .map_err(ApiError::from)?; + for workload in branch_workloads { + let kind = KubeWorkloadKind::from_str(&workload.kind).map_err(|_| { + ApiError::InvalidRequest(format!( + "Invalid workload kind {} in branch environment", + workload.kind + )) + })?; + resources.push((Self::workload_kind_to_resource_kind(&kind), workload.name)); + } + + let branch_services = self + .db + .get_environment_services(environment.id) + .await + .map_err(ApiError::from)?; + for service in branch_services { + resources.push((NamespacedResourceKind::Service, service.name)); + } + + let mut ctx = Self::kube_manager_rpc_context(); + match rpc_client + .delete_namespaced_resources(ctx, branch_namespace.clone(), resources) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => { + tracing::warn!( + environment_id = %environment.id, + error = %err, + "KubeManager refused to delete some branch resources" + ); + } + Err(err) => { + tracing::warn!( + environment_id = %environment.id, + error = ?err, + "RPC error while deleting branch resources" + ); + } + } + + Ok(()) + } + + pub async fn delete_kube_environment( + &self, + org_id: Uuid, + _user_id: Uuid, + environment_id: Uuid, + ) -> Result<(), ApiError> { + // Authorize deletion (HRPC already enforced ownership/role, so we just ensure the environment exists and belongs to the org) + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + if environment.is_shared { + let has_branches = self + .db + .check_environment_has_branches(environment_id) + .await + .map_err(ApiError::from)?; + + if has_branches { + return Err(ApiError::InvalidRequest( + "Cannot delete shared environment: it has active branch environments. Please delete them first.".to_string() + )); + } + } + + // Get RPC client for cluster operations + let rpc_client = self + .get_random_kube_cluster_server(environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for this cluster; cannot delete environment" + .to_string(), + ) + })? + .rpc_client + .clone(); + + // If this is a branch environment, notify the base environment's devbox-proxy + if let Some(base_env_id) = environment.base_environment_id { + self.notify_branch_environment_deletion(&rpc_client, base_env_id, environment_id) + .await?; + } + + // Mark as deleting immediately + self.update_environment_status(environment_id, KubeEnvironmentStatus::Deleting, None, None) + .await?; + + // Destroy environment resources in Kubernetes asynchronously + let controller = self.clone(); + let environment_clone = environment.clone(); + let rpc_client_clone = rpc_client.clone(); + + tokio::spawn(async move { + let env_id = environment_clone.id; + let destroy_result = if environment_clone.base_environment_id.is_some() { + controller + .destroy_branch_environment_resources(&rpc_client_clone, &environment_clone) + .await + } else { + controller + .destroy_environment_resources(&rpc_client_clone, &environment_clone) + .await + }; + + match destroy_result { + Ok(_) => { + match controller + .db + .delete_kube_environment(env_id) + .await + .map_err(ApiError::from) + { + Ok(_) => { + if environment_clone.base_environment_id.is_none() { + match rpc_client_clone + .remove_namespace_watch( + KubeController::kube_manager_rpc_context(), + environment_clone.namespace.clone(), + ) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => warn!( + cluster_id = %environment_clone.cluster_id, + namespace = %environment_clone.namespace, + error = %err, + "Failed to remove namespace watch after environment deletion" + ), + Err(err) => warn!( + cluster_id = %environment_clone.cluster_id, + namespace = %environment_clone.namespace, + error = ?err, + "RPC error while removing namespace watch after environment deletion" + ), + } + } + + let event = EnvironmentLifecycleEvent { + organization_id: environment_clone.organization_id, + environment_id: env_id, + status: KubeEnvironmentStatus::Deleted, + paused_at: environment_clone + .paused_at + .as_ref() + .map(|dt| dt.to_string()), + resumed_at: environment_clone + .resumed_at + .as_ref() + .map(|dt| dt.to_string()), + updated_at: Utc::now(), + }; + controller.publish_environment_event(event).await; + } + Err(err) => { + error!( + environment_id = %env_id, + error = ?err, + "failed to delete environment record after resource cleanup" + ); + let _ = controller + .update_environment_status( + env_id, + KubeEnvironmentStatus::Error, + None, + None, + ) + .await; + } + } + } + Err(err) => { + error!( + environment_id = %env_id, + error = ?err, + "failed to cleanup environment resources" + ); + let _ = controller + .update_environment_status(env_id, KubeEnvironmentStatus::Error, None, None) + .await; + } + } + }); + + Ok(()) + } + + pub async fn pause_kube_environment( + &self, + _org_id: Uuid, + _user_id: Uuid, + environment_id: Uuid, + ) -> Result<(), ApiError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + let status = KubeEnvironmentStatus::from_str(&environment.status) + .unwrap_or(KubeEnvironmentStatus::Creating); + + match status { + KubeEnvironmentStatus::Running | KubeEnvironmentStatus::PauseFailed => {} // allowed transitions + KubeEnvironmentStatus::Paused => { + return Err(ApiError::InvalidRequest( + "Environment is already paused".to_string(), + )) + } + _ => { + return Err(ApiError::InvalidRequest(format!( + "Environment cannot be paused while it is in status {}", + status + ))) + } + } + + self.update_environment_status(environment.id, KubeEnvironmentStatus::Pausing, None, None) + .await?; + + let controller = self.clone(); + tokio::spawn(async move { + if let Err(err) = controller.run_pause_environment(environment.id).await { + error!( + environment_id = %environment.id, + error = ?err, + "Pause environment orchestration failed" + ); + let _ = controller + .update_environment_status( + environment.id, + KubeEnvironmentStatus::PauseFailed, + None, + None, + ) + .await; + } + }); + + Ok(()) + } + + pub async fn resume_kube_environment( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result<(), ApiError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + let status = KubeEnvironmentStatus::from_str(&environment.status) + .unwrap_or(KubeEnvironmentStatus::Creating); + + match status { + KubeEnvironmentStatus::Paused + | KubeEnvironmentStatus::PauseFailed + | KubeEnvironmentStatus::ResumeFailed => {} + KubeEnvironmentStatus::Running => { + return Err(ApiError::InvalidRequest( + "Environment is already running".to_string(), + )) + } + _ => { + return Err(ApiError::InvalidRequest(format!( + "Environment cannot be resumed while it is in status {}", + status + ))) + } + } + + self.update_environment_status(environment.id, KubeEnvironmentStatus::Resuming, None, None) + .await?; + + let controller = self.clone(); + tokio::spawn(async move { + if let Err(err) = controller.run_resume_environment(environment.id).await { + error!( + environment_id = %environment.id, + error = ?err, + "Resume environment orchestration failed" + ); + let _ = controller + .update_environment_status( + environment.id, + KubeEnvironmentStatus::ResumeFailed, + None, + None, + ) + .await; + } + }); + + Ok(()) + } + + /// Validate and get app catalog and cluster for environment creation + async fn validate_environment_creation( + &self, + org_id: Uuid, + app_catalog_id: Uuid, + cluster_id: Uuid, + is_shared: bool, + ) -> Result< + ( + lapdev_db_entities::kube_app_catalog::Model, + lapdev_db_entities::kube_cluster::Model, + ), + ApiError, + > { + // Verify app catalog belongs to the organization + let app_catalog = self + .db + .get_app_catalog(app_catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if app_catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Verify cluster belongs to the organization + let cluster = self + .db + .get_kube_cluster(cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + if cluster.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Check if the cluster allows deployments for the requested environment type + if is_shared && !cluster.can_deploy_shared { + return Err(ApiError::InvalidRequest( + "Shared deployments are not allowed on this cluster".to_string(), + )); + } + + if !is_shared && !cluster.can_deploy_personal { + return Err(ApiError::InvalidRequest( + "Personal deployments are not allowed on this cluster".to_string(), + )); + } + + Ok((app_catalog, cluster)) + } + + pub async fn create_kube_environment( + &self, + org_id: Uuid, + user_id: Uuid, + app_catalog_id: Uuid, + cluster_id: Uuid, + name: String, + is_shared: bool, + ) -> Result { + let name = Self::normalize_environment_name(name)?; + + // Validate catalog and cluster + let (app_catalog, cluster) = self + .validate_environment_creation(org_id, app_catalog_id, cluster_id, is_shared) + .await?; + + let server = self.require_cluster_server(cluster_id).await?; + let workloads = self.fetch_catalog_workloads(&app_catalog).await?; + let namespace = self + .generate_unique_namespace(cluster_id, Self::namespace_kind(is_shared)) + .await?; + let environment_id = Uuid::new_v4(); + let auth_token = rand_string(32); + let environment_namespace = namespace.clone(); + let (workloads_with_resources, workload_details) = self + .prepare_catalog_workloads( + &app_catalog, + &cluster, + workloads, + environment_id, + &namespace, + &auth_token, + ) + .await?; + let services_map = workloads_with_resources.services.clone(); + + let created_env = self + .persist_environment_creation( + environment_id, + org_id, + user_id, + app_catalog_id, + cluster_id, + name, + namespace, + is_shared, + app_catalog.sync_version, + workload_details, + services_map, + auth_token, + ) + .await?; + + // Ensure kube-manager begins watching the new namespace immediately. + if let Err(err) = server + .add_namespace_watch(environment_namespace.clone()) + .await + { + warn!( + cluster_id = %cluster_id, + namespace = environment_namespace, + error = ?err, + "Failed to add namespace watch for new environment" + ); + } + + self.spawn_environment_deployment(server, created_env.clone(), workloads_with_resources); + + Ok(Self::build_environment_response( + created_env, + app_catalog.name, + cluster.name, + None, + None, + None, + )) + } + + fn normalize_environment_name(name: String) -> Result { + let trimmed = name.trim(); + if trimmed.is_empty() { + return Err(ApiError::InvalidRequest( + "Environment name cannot be empty".to_string(), + )); + } + Ok(trimmed.to_string()) + } + + fn namespace_kind(is_shared: bool) -> EnvironmentNamespaceKind { + if is_shared { + EnvironmentNamespaceKind::Shared + } else { + EnvironmentNamespaceKind::Personal + } + } + + async fn require_cluster_server( + &self, + cluster_id: Uuid, + ) -> Result { + self.get_random_kube_cluster_server(cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for the environment target cluster".to_string(), + ) + }) + } + + async fn fetch_catalog_workloads( + &self, + app_catalog: &lapdev_db_entities::kube_app_catalog::Model, + ) -> Result, ApiError> { + let workloads = self + .db + .get_app_catalog_workloads(app_catalog.id) + .await + .map_err(ApiError::from)?; + + if workloads.is_empty() { + return Err(ApiError::InvalidRequest(format!( + "No workloads found for app catalog '{}'", + app_catalog.name + ))); + } + + Ok(workloads) + } + + async fn prepare_catalog_workloads( + &self, + app_catalog: &lapdev_db_entities::kube_app_catalog::Model, + cluster: &lapdev_db_entities::kube_cluster::Model, + workloads: Vec, + environment_id: Uuid, + namespace: &str, + auth_token: &str, + ) -> Result<(KubeWorkloadsWithResources, Vec), ApiError> { + self.get_catalog_workloads_with_yaml_from_db( + app_catalog.cluster_id, + workloads, + &SidecarInjectionContext { + environment_id, + namespace, + auth_token, + manager_namespace: cluster.manager_namespace.as_deref(), + }, + ) + .await + } + + async fn persist_environment_creation( + &self, + environment_id: Uuid, + org_id: Uuid, + user_id: Uuid, + app_catalog_id: Uuid, + cluster_id: Uuid, + name: String, + namespace: String, + is_shared: bool, + catalog_sync_version: i64, + workload_details: Vec, + services_map: HashMap, + auth_token: String, + ) -> Result { + match self + .db + .create_kube_environment( + environment_id, + org_id, + user_id, + app_catalog_id, + cluster_id, + name, + namespace, + KubeEnvironmentStatus::Creating.to_string(), + is_shared, + catalog_sync_version, + None, + workload_details, + services_map, + auth_token, + ) + .await + { + Ok(env) => Ok(env), + Err(db_err) => { + if let Some(sea_orm::SqlErr::UniqueConstraintViolation(constraint)) = + db_err.sql_err() + { + if constraint == "kube_environment_app_cluster_namespace_unique_idx" { + return Err(ApiError::InvalidRequest( + "This app catalog is already deployed to the specified namespace in this cluster".to_string(), + )); + } + if constraint == "kube_environment_cluster_namespace_unique_idx" { + return Err(ApiError::InternalError( + "Namespace allocation conflict. Please retry environment creation." + .to_string(), + )); + } + } + Err(ApiError::from(anyhow::Error::from(db_err))) + } + } + } + + fn spawn_environment_deployment( + &self, + server: KubeClusterServer, + created_env: lapdev_db_entities::kube_environment::Model, + workloads_with_resources: KubeWorkloadsWithResources, + ) { + let controller = self.clone(); + let server_clone = server.clone(); + let env_for_task = created_env.clone(); + + tokio::spawn(async move { + match controller + .deploy_environment_resources( + &server_clone, + &env_for_task, + workloads_with_resources, + None, + ) + .await + { + Ok(_) => { + if let Err(err) = controller + .update_environment_status( + env_for_task.id, + KubeEnvironmentStatus::Running, + None, + None, + ) + .await + { + error!( + environment_id = %env_for_task.id, + error = ?err, + "failed to mark environment as running after creation" + ); + } + } + Err(err) => { + error!( + environment_id = %env_for_task.id, + error = ?err, + "environment creation deployment failed" + ); + if let Err(status_err) = controller + .update_environment_status( + env_for_task.id, + KubeEnvironmentStatus::Error, + None, + None, + ) + .await + { + error!( + environment_id = %env_for_task.id, + error = ?status_err, + "failed to mark environment as errored after deployment failure" + ); + } + } + } + }); + } + + /// Validate base environment for branch creation + async fn validate_base_environment( + &self, + org_id: Uuid, + base_environment_id: Uuid, + ) -> Result< + ( + lapdev_db_entities::kube_environment::Model, + lapdev_db_entities::kube_cluster::Model, + lapdev_db_entities::kube_app_catalog::Model, + ), + ApiError, + > { + // Get the base environment + let base_environment = self + .db + .get_kube_environment(base_environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Base environment not found".to_string()))?; + + // Verify base environment belongs to the same organization + if base_environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // Verify base environment is shared (only shared environments can be used as base) + if !base_environment.is_shared { + return Err(ApiError::InvalidRequest( + "Only shared environments can be used as base environments".to_string(), + )); + } + + // Verify base environment is not itself a branch environment + if base_environment.base_environment_id.is_some() { + return Err(ApiError::InvalidRequest( + "Cannot create a branch from another branch environment".to_string(), + )); + } + + // Get the cluster + let cluster = self + .db + .get_kube_cluster(base_environment.cluster_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Cluster not found".to_string()))?; + + // Get app catalog + let app_catalog = self + .db + .get_app_catalog(base_environment.app_catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + Ok((base_environment, cluster, app_catalog)) + } + + fn strip_sidecar_proxy_from_yaml( + kind: &KubeWorkloadKind, + yaml: &str, + ) -> Result { + match kind { + KubeWorkloadKind::Deployment => { + let mut deployment: Deployment = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse deployment YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(spec) = deployment.spec.as_mut() { + if let Some(pod_spec) = spec.template.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + } + serde_yaml::to_string(&deployment).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize deployment YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + KubeWorkloadKind::StatefulSet => { + let mut statefulset: StatefulSet = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse statefulset YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(spec) = statefulset.spec.as_mut() { + if let Some(pod_spec) = spec.template.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + } + serde_yaml::to_string(&statefulset).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize statefulset YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + KubeWorkloadKind::DaemonSet => { + let mut daemonset: DaemonSet = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse daemonset YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(spec) = daemonset.spec.as_mut() { + if let Some(pod_spec) = spec.template.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + } + serde_yaml::to_string(&daemonset).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize daemonset YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + KubeWorkloadKind::ReplicaSet => { + let mut replicaset: ReplicaSet = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse replicaset YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(spec) = replicaset.spec.as_mut() { + if let Some(template) = spec.template.as_mut() { + if let Some(pod_spec) = template.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + } + } + serde_yaml::to_string(&replicaset).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize replicaset YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + KubeWorkloadKind::Pod => { + let mut pod: Pod = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse pod YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(pod_spec) = pod.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + serde_yaml::to_string(&pod).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize pod YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + KubeWorkloadKind::Job => { + let mut job: Job = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse job YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(spec) = job.spec.as_mut() { + if let Some(pod_spec) = spec.template.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + } + serde_yaml::to_string(&job).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize job YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + KubeWorkloadKind::CronJob => { + let mut cronjob: CronJob = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse cronjob YAML while stripping lapdev-sidecar-proxy: {err}" + )) + })?; + if let Some(spec) = cronjob.spec.as_mut() { + if let Some(job_spec) = spec.job_template.spec.as_mut() { + if let Some(pod_spec) = job_spec.template.spec.as_mut() { + Self::strip_sidecar_proxy_from_pod_spec(pod_spec); + } + } + } + serde_yaml::to_string(&cronjob).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize cronjob YAML after stripping lapdev-sidecar-proxy: {err}" + )) + }) + } + } + } + + fn strip_sidecar_proxy_from_pod_spec(pod_spec: &mut PodSpec) { + const SIDECAR_NAME: &str = "lapdev-sidecar-proxy"; + pod_spec + .containers + .retain(|container| container.name != SIDECAR_NAME); + if let Some(init_containers) = pod_spec.init_containers.as_mut() { + init_containers.retain(|container| container.name != SIDECAR_NAME); + } + } + + fn restore_service_target_ports( + service_yaml: String, + ports: &mut [KubeServicePort], + ) -> Result { + let mut service: Service = serde_yaml::from_str(&service_yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse service YAML while restoring lapdev-sidecar proxy targets: {err}" + )) + })?; + + let mut target_map: HashMap<(Option, i32), i32> = HashMap::new(); + for port in ports.iter_mut() { + if let Some(original_target) = port.original_target_port { + port.target_port = Some(original_target); + target_map.insert((port.name.clone(), port.port), original_target); + } + } + + if let Some(spec) = service.spec.as_mut() { + if let Some(spec_ports) = spec.ports.as_mut() { + for spec_port in spec_ports.iter_mut() { + let key = (spec_port.name.clone(), spec_port.port); + if let Some(original) = target_map + .get(&key) + .or_else(|| target_map.get(&(None, spec_port.port))) + { + spec_port.target_port = Some(IntOrString::Int(*original)); + } + } + } + } + + serde_yaml::to_string(&service).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize service YAML after restoring lapdev-sidecar proxy targets: {err}" + )) + }) + } + + fn branch_resource_name(name: &str, branch_environment_id: Uuid) -> String { + let env_suffix = branch_environment_id.to_string(); + if name.ends_with(&env_suffix) { + name.to_string() + } else { + format!("{}-{}", name, branch_environment_id) + } + } + + fn strip_branch_suffix(name: &str, branch_environment_id: Uuid) -> String { + let suffix = format!("-{}", branch_environment_id); + if name.ends_with(&suffix) { + name[..name.len() - suffix.len()].to_string() + } else { + name.to_string() + } + } + + fn build_branch_workload_details( + workload: &lapdev_common::kube::KubeEnvironmentWorkload, + namespace: &str, + branch_environment_id: Uuid, + ) -> Result<(NewEnvironmentWorkload, String), ApiError> { + let kind: KubeWorkloadKind = workload.kind.parse().map_err(|_| { + ApiError::InvalidRequest(format!( + "Invalid workload kind {} in base environment", + workload.kind + )) + })?; + + if workload.workload_yaml.trim().is_empty() { + return Err(ApiError::InvalidRequest(format!( + "Base workload {} is missing YAML", + workload.name + ))); + } + + let branch_name = Self::branch_resource_name(&workload.name, branch_environment_id); + let sanitized_yaml = Self::strip_sidecar_proxy_from_yaml(&kind, &workload.workload_yaml)?; + let mut workload_yaml = Self::wrap_workload_yaml(kind.clone(), sanitized_yaml); + let selector = build_branch_service_selector(&branch_name); + rename_workload_yaml(&mut workload_yaml, &branch_name, &selector)?; + let workload_yaml_string = Self::workload_yaml_to_string(&workload_yaml); + + let mut containers = workload.containers.clone(); + for container in &mut containers { + container.original_env_vars = container.env_vars.clone(); + container.env_vars.clear(); + + match &container.image { + KubeContainerImage::Custom(custom_image) => { + container.original_image = custom_image.clone(); + container.image = KubeContainerImage::FollowOriginal; + } + KubeContainerImage::FollowOriginal => {} + } + } + + let env_workload_id = Uuid::new_v4(); + let details = KubeWorkloadDetails { + name: branch_name.clone(), + namespace: namespace.to_string(), + kind, + containers, + ports: workload.ports.clone(), + workload_yaml: workload_yaml_string, + base_workload_id: Some(workload.id), + }; + + Ok(( + NewEnvironmentWorkload { + id: env_workload_id, + details, + }, + branch_name, + )) + } + + fn service_targets_workload( + service: &lapdev_common::kube::KubeEnvironmentService, + base_workload_name: &str, + branch_workload_name: &str, + ) -> bool { + if service.name == base_workload_name || service.name == branch_workload_name { + return true; + } + service + .selector + .values() + .any(|value| value == base_workload_name || value == branch_workload_name) + } + + fn build_branch_service_entry( + service: &lapdev_common::kube::KubeEnvironmentService, + branch_environment_id: Uuid, + branch_workload_name: &str, + ) -> Result { + let branch_service_name = Self::branch_resource_name(&service.name, branch_environment_id); + let branch_selector = build_branch_service_selector(branch_workload_name); + let renamed_yaml = + rename_service_yaml(&service.yaml, &branch_service_name, &branch_selector)?; + let mut ports = service.ports.clone(); + let restored_yaml = Self::restore_service_target_ports(renamed_yaml, &mut ports)?; + Ok(lapdev_common::kube::KubeServiceWithYaml { + yaml: restored_yaml, + details: KubeServiceDetails { + name: branch_service_name, + ports, + selector: branch_selector, + }, + }) + } + + pub(super) async fn ensure_branch_workload_override( + &self, + environment: &lapdev_db_entities::kube_environment::Model, + base_workload: &lapdev_common::kube::KubeEnvironmentWorkload, + ) -> Result { + environment.base_environment_id.ok_or_else(|| { + ApiError::InvalidRequest("Branch environment missing base environment".to_string()) + })?; + + if let Some(existing) = self + .db + .get_branch_workload_override(base_workload.id, environment.id) + .await + .map_err(ApiError::from)? + { + return Ok(existing); + } + + let (new_workload, branch_workload_name) = Self::build_branch_workload_details( + base_workload, + &environment.namespace, + environment.id, + )?; + + let inserted = self + .db + .insert_environment_workload( + environment.id, + new_workload, + environment.catalog_sync_version, + ) + .await + .map_err(ApiError::from)?; + + self.ensure_branch_services_for_workload( + environment, + &base_workload.name, + &branch_workload_name, + ) + .await?; + + self.db + .get_environment_workload(inserted.id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| { + ApiError::InternalError( + "Failed to load newly created branch workload from database".to_string(), + ) + }) + } + + async fn reapply_branch_overrides_from_base( + &self, + branch_environment: &lapdev_db_entities::kube_environment::Model, + base_environment: &lapdev_db_entities::kube_environment::Model, + ) -> Result<(), ApiError> { + let branch_workloads = self + .db + .get_environment_workloads(branch_environment.id) + .await + .map_err(ApiError::from)?; + + if branch_workloads.is_empty() { + return Ok(()); + } + + let base_workloads = self + .db + .get_environment_workloads(base_environment.id) + .await + .map_err(ApiError::from)?; + + let mut base_workloads_by_name = HashMap::with_capacity(base_workloads.len()); + for workload in base_workloads { + base_workloads_by_name.insert(workload.name.clone(), workload); + } + + let base_services = self + .db + .get_environment_services(base_environment.id) + .await + .map_err(ApiError::from)?; + + let Some(base_environment_id) = branch_environment.base_environment_id else { + return Err(ApiError::InvalidRequest( + "Branch environment missing base environment".to_string(), + )); + }; + + let cluster_server = self + .get_random_kube_cluster_server(branch_environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for the environment target cluster".to_string(), + ) + })?; + + for branch_workload in branch_workloads { + let branch_workload_name = branch_workload.name.clone(); + let base_name = Self::strip_branch_suffix(&branch_workload_name, branch_environment.id); + + let Some(base_workload) = base_workloads_by_name.get(&base_name) else { + tracing::warn!( + environment_id = %branch_environment.id, + workload = %branch_workload_name, + "Skipping branch workload rebase because base workload no longer exists" + ); + continue; + }; + + self.refresh_branch_services_for_workload( + branch_environment, + &base_name, + &branch_workload_name, + &base_services, + ) + .await?; + + let (template_workload, _) = Self::build_branch_workload_details( + base_workload, + &branch_environment.namespace, + branch_environment.id, + )?; + + let KubeWorkloadDetails { + workload_yaml: template_yaml, + containers: template_containers, + .. + } = template_workload.details; + + let merged_containers = + Self::merge_branch_containers(&template_containers, &branch_workload.containers); + + let mut updated_workload = branch_workload.clone(); + updated_workload.workload_yaml = template_yaml; + updated_workload.base_workload_id = Some(base_workload.id); + + let kind = KubeWorkloadKind::from_str(&updated_workload.kind).map_err(|_| { + ApiError::InvalidRequest(format!( + "Invalid workload kind {} when rebasing branch workload", + updated_workload.kind + )) + })?; + + let (workload_with_resources, persisted_yaml, extra_labels) = self + .build_branch_workload_manifest( + branch_environment, + &updated_workload, + kind, + &merged_containers, + ) + .await?; + + self.deploy_environment_resources( + &cluster_server, + branch_environment, + KubeWorkloadsWithResources { + workloads: vec![workload_with_resources.workload], + services: workload_with_resources.services, + configmaps: workload_with_resources.configmaps, + secrets: workload_with_resources.secrets, + }, + Some(extra_labels), + ) + .await?; + + match cluster_server + .build_branch_service_route_config( + base_environment, + base_workload.id, + branch_environment.id, + ) + .await + { + Ok(Some(route)) => { + Self::send_branch_service_route_update( + &cluster_server, + base_environment_id, + base_workload.id, + branch_environment.id, + route, + ) + .await; + } + Ok(None) => { + Self::send_branch_service_route_removal( + &cluster_server, + base_environment_id, + base_workload.id, + branch_environment.id, + ) + .await; + } + Err(err) => { + tracing::warn!( + base_environment_id = %base_environment_id, + branch_environment_id = %branch_environment.id, + workload_id = %base_workload.id, + error = ?err, + "Failed to build branch service route config during rebase; falling back to refresh" + ); + Self::refresh_branch_service_routes_with_logging( + &cluster_server, + base_environment_id, + ) + .await; + } + } + + self.db + .update_environment_workload(branch_workload.id, merged_containers, persisted_yaml) + .await + .map_err(ApiError::from)?; + + self.db + .update_branch_workload_link( + branch_workload.id, + base_workload.id, + base_environment.catalog_sync_version, + ) + .await + .map_err(ApiError::from)?; + } + + Ok(()) + } + + async fn refresh_branch_services_for_workload( + &self, + branch_environment: &lapdev_db_entities::kube_environment::Model, + base_workload_name: &str, + branch_workload_name: &str, + base_services: &[KubeEnvironmentService], + ) -> Result<(), ApiError> { + let mut desired_services = Vec::new(); + for service in base_services { + if Self::service_targets_workload(service, base_workload_name, branch_workload_name) { + desired_services.push(Self::build_branch_service_entry( + service, + branch_environment.id, + branch_workload_name, + )?); + } + } + + let desired_names: HashSet = desired_services + .iter() + .map(|svc| svc.details.name.clone()) + .collect(); + + let existing_services = self + .db + .get_environment_services(branch_environment.id) + .await + .map_err(ApiError::from)?; + + for service in existing_services { + if Self::service_targets_workload(&service, base_workload_name, branch_workload_name) + && !desired_names.contains(&service.name) + { + self.db + .soft_delete_environment_service(branch_environment.id, &service.name) + .await + .map_err(ApiError::from)?; + } + } + + for entry in desired_services { + self.db + .soft_delete_environment_service(branch_environment.id, &entry.details.name) + .await + .map_err(ApiError::from)?; + self.db + .insert_environment_service( + branch_environment.id, + &branch_environment.namespace, + entry, + ) + .await + .map_err(ApiError::from)?; + } + + Ok(()) + } + + fn merge_branch_containers( + template_containers: &[lapdev_common::kube::KubeContainerInfo], + branch_containers: &[lapdev_common::kube::KubeContainerInfo], + ) -> Vec { + template_containers + .iter() + .map(|template| { + let mut merged = template.clone(); + if let Some(branch) = branch_containers + .iter() + .find(|container| container.name == template.name) + { + merged.image = branch.image.clone(); + merged.cpu_request = branch.cpu_request.clone(); + merged.cpu_limit = branch.cpu_limit.clone(); + merged.memory_request = branch.memory_request.clone(); + merged.memory_limit = branch.memory_limit.clone(); + merged.env_vars = branch.env_vars.clone(); + merged.ports = branch.ports.clone(); + } + merged + }) + .collect() + } + + async fn relink_branch_overrides( + &self, + base_environment: &lapdev_db_entities::kube_environment::Model, + replacements: &[(Uuid, Uuid)], + new_catalog_sync_version: i64, + ) -> Result<(), ApiError> { + if replacements.is_empty() { + return Ok(()); + } + + for (old_base_id, new_base_id) in replacements { + let branch_workloads = self + .db + .get_workloads_by_base_workload_id(*old_base_id) + .await + .map_err(ApiError::from)?; + + for branch_workload in branch_workloads { + if branch_workload.environment_id == base_environment.id { + continue; + } + + self.db + .update_branch_workload_link( + branch_workload.id, + *new_base_id, + new_catalog_sync_version, + ) + .await + .map_err(ApiError::from)?; + } + } + + Ok(()) + } + + async fn ensure_branch_services_for_workload( + &self, + environment: &lapdev_db_entities::kube_environment::Model, + base_workload_name: &str, + branch_workload_name: &str, + ) -> Result<(), ApiError> { + let base_environment_id = environment.base_environment_id.ok_or_else(|| { + ApiError::InvalidRequest("Branch environment missing base environment".to_string()) + })?; + + let base_services = self + .db + .get_environment_services(base_environment_id) + .await + .map_err(ApiError::from)?; + let existing_services = self + .db + .get_environment_services(environment.id) + .await + .map_err(ApiError::from)?; + let mut existing_names: HashSet = HashSet::new(); + for svc in existing_services { + existing_names.insert(svc.name.clone()); + existing_names.insert(Self::strip_branch_suffix(&svc.name, environment.id)); + } + + for service in base_services { + if !Self::service_targets_workload(&service, base_workload_name, branch_workload_name) { + continue; + } + + let base_service_name = service.name.clone(); + let branch_service_name = + Self::branch_resource_name(&base_service_name, environment.id); + if existing_names.contains(&base_service_name) + || existing_names.contains(&branch_service_name) + { + continue; + } + + let branch_entry = + Self::build_branch_service_entry(&service, environment.id, branch_workload_name)?; + let inserted_name = branch_entry.details.name.clone(); + self.db + .insert_environment_service(environment.id, &environment.namespace, branch_entry) + .await + .map_err(ApiError::from)?; + existing_names.insert(base_service_name); + existing_names.insert(inserted_name); + } + + Ok(()) + } + + fn workload_kind_to_resource_kind(kind: &KubeWorkloadKind) -> NamespacedResourceKind { + match kind { + KubeWorkloadKind::Deployment => NamespacedResourceKind::Deployment, + KubeWorkloadKind::StatefulSet => NamespacedResourceKind::StatefulSet, + KubeWorkloadKind::DaemonSet => NamespacedResourceKind::DaemonSet, + KubeWorkloadKind::ReplicaSet => NamespacedResourceKind::ReplicaSet, + KubeWorkloadKind::Pod => NamespacedResourceKind::Pod, + KubeWorkloadKind::Job => NamespacedResourceKind::Job, + KubeWorkloadKind::CronJob => NamespacedResourceKind::CronJob, + } + } + + fn workload_yaml_to_string(workload: &KubeWorkloadYamlOnly) -> String { + match workload { + KubeWorkloadYamlOnly::Deployment(yaml) + | KubeWorkloadYamlOnly::StatefulSet(yaml) + | KubeWorkloadYamlOnly::DaemonSet(yaml) + | KubeWorkloadYamlOnly::ReplicaSet(yaml) + | KubeWorkloadYamlOnly::Pod(yaml) + | KubeWorkloadYamlOnly::Job(yaml) + | KubeWorkloadYamlOnly::CronJob(yaml) => yaml.clone(), + } + } + + /// Notify devbox-proxy about new branch environment + async fn notify_branch_environment_creation( + &self, + base_environment_id: Uuid, + cluster_id: Uuid, + created_env: &lapdev_db_entities::kube_environment::Model, + ) { + if let Some(server) = self.get_random_kube_cluster_server(cluster_id).await { + let branch_info = lapdev_kube_rpc::BranchEnvironmentInfo { + environment_id: created_env.id, + auth_token: created_env.auth_token.clone(), + namespace: created_env.namespace.clone(), + }; + + match server + .rpc_client + .add_branch_environment( + Self::kube_manager_rpc_context(), + base_environment_id, + branch_info, + ) + .await + { + Ok(Ok(())) => { + tracing::info!( + "Successfully notified devbox-proxy about new branch environment {}", + created_env.id + ); + } + Ok(Err(e)) => { + tracing::error!( + "Failed to notify devbox-proxy about new branch environment {}: {}", + created_env.id, + e + ); + } + Err(e) => { + tracing::error!( + "RPC call failed when notifying about new branch environment {}: {}", + created_env.id, + e + ); + } + } + } else { + tracing::warn!( + "No connected KubeManager for cluster {} - branch environment {} not registered with devbox-proxy", + cluster_id, + created_env.id + ); + } + } + + pub async fn create_branch_environment( + &self, + org_id: Uuid, + user_id: Uuid, + base_environment_id: Uuid, + name: String, + ) -> Result { + let name = name.trim(); + if name.is_empty() { + return Err(ApiError::InvalidRequest( + "Environment name cannot be empty".to_string(), + )); + } + let name = name.to_string(); + + // Validate base environment + let (base_environment, cluster, app_catalog) = self + .validate_base_environment(org_id, base_environment_id) + .await?; + + let branch_environment_id = Uuid::new_v4(); + let auth_token = rand_string(32); + let namespace = base_environment.namespace.clone(); + + // Create environment in database + let mut created_env = match self + .db + .create_kube_environment( + branch_environment_id, + org_id, + user_id, + base_environment.app_catalog_id, + base_environment.cluster_id, + name.clone(), + namespace.clone(), + KubeEnvironmentStatus::Creating.to_string(), + false, // Branch environments are always personal (not shared) + base_environment.catalog_sync_version, + Some(base_environment_id), // Set the base environment reference + Vec::new(), + HashMap::new(), + auth_token.clone(), + ) + .await + { + Ok(env) => env, + Err(db_err) => { + if let Some(sea_orm::SqlErr::UniqueConstraintViolation(constraint)) = + db_err.sql_err() + { + if constraint == "kube_environment_cluster_namespace_unique_idx" { + return Err(ApiError::InternalError( + "Namespace allocation conflict. Please retry branch environment creation.".to_string(), + )); + } + } + return Err(ApiError::from(anyhow::Error::from(db_err))); + } + }; + + // Notify kube-manager about the new branch environment + self.notify_branch_environment_creation( + base_environment_id, + base_environment.cluster_id, + &created_env, + ) + .await; + + self.update_environment_status(created_env.id, KubeEnvironmentStatus::Running, None, None) + .await?; + created_env.status = KubeEnvironmentStatus::Running.to_string(); + + // Build and return response + Ok(Self::build_environment_response( + created_env, + app_catalog.name, + cluster.name, + Some(base_environment.name), + Some(base_environment.catalog_sync_version), + base_environment.last_catalog_synced_at, + )) + } + + /// Authorize and validate that the environment can be synced from catalog + async fn authorize_and_validate_sync( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result< + ( + lapdev_db_entities::kube_environment::Model, + lapdev_db_entities::kube_app_catalog::Model, + ), + ApiError, + > { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + if environment.base_environment_id.is_some() { + return Err(ApiError::InvalidRequest( + "Branch environments must be rebased from their shared environment".to_string(), + )); + } + + let catalog = self + .db + .get_app_catalog(environment.app_catalog_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("App catalog not found".to_string()))?; + + if catalog.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + if catalog.sync_version == environment.catalog_sync_version { + return Err(ApiError::InvalidRequest( + "Environment is already up to date".to_string(), + )); + } + + Ok((environment, catalog)) + } + + /// Determine which workloads need to be synced by comparing sync versions + async fn determine_workloads_to_sync( + &self, + environment_id: Uuid, + catalog_id: Uuid, + ) -> Result< + ( + Vec, + Vec, + ), + ApiError, + > { + // Get existing environment workloads + let existing_workloads = self + .db + .get_environment_workloads(environment_id) + .await + .map_err(ApiError::from)?; + + // Build a map of existing workloads by name for quick lookup + let existing_workloads_map: std::collections::HashMap = existing_workloads + .into_iter() + .map(|w| (w.name.clone(), w)) + .collect(); + + let catalog_workloads = self + .db + .get_app_catalog_workloads(catalog_id) + .await + .map_err(ApiError::from)?; + + // Determine which workloads need to be deployed (added or updated) + let workloads_to_deploy: Vec<_> = catalog_workloads + .iter() + .filter(|catalog_workload| { + match existing_workloads_map.get(&catalog_workload.name) { + Some(existing_workload) => { + // Workload needs update if catalog version is newer + catalog_workload.catalog_sync_version + > existing_workload.catalog_sync_version + } + None => { + // New workload, needs to be deployed + true + } + } + }) + .cloned() + .collect(); + + Ok((catalog_workloads, workloads_to_deploy)) + } + + /// Deploy changed workloads to Kubernetes + async fn perform_workload_deployment( + &self, + environment: &lapdev_db_entities::kube_environment::Model, + catalog: &lapdev_db_entities::kube_app_catalog::Model, + workloads_to_deploy: &[lapdev_common::kube::KubeAppCatalogWorkload], + total_workloads: usize, + ) -> Result<(HashSet, Vec), ApiError> { + if workloads_to_deploy.is_empty() { + tracing::info!( + "No workload changes detected for environment {} - skipping K8s deployment", + environment.name + ); + return Ok((HashSet::new(), Vec::new())); + } + + tracing::info!( + "Syncing {}/{} workloads for environment {} (only changed ones)", + workloads_to_deploy.len(), + total_workloads, + environment.name + ); + + let target_server = self + .get_random_kube_cluster_server(environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for the environment target cluster".to_string(), + ) + })?; + + let manager_namespace = self + .db + .get_kube_cluster(catalog.cluster_id) + .await + .map_err(ApiError::from)? + .and_then(|cluster| cluster.manager_namespace); + + // Get workload YAML from database cache instead of querying Kubernetes + // Handles workloads from multiple namespaces + let (workloads_with_resources, workload_details) = self + .get_catalog_workloads_with_yaml_from_db( + catalog.cluster_id, + workloads_to_deploy.to_vec(), + &SidecarInjectionContext { + environment_id: environment.id, + namespace: &environment.namespace, + auth_token: &environment.auth_token, + manager_namespace: manager_namespace.as_deref(), + }, + ) + .await?; + + let service_names: HashSet = + workloads_with_resources.services.keys().cloned().collect(); + + self.deploy_environment_resources( + &target_server, + &environment, + workloads_with_resources, + None, + ) + .await?; + + Ok((service_names, workload_details)) + } + + /// Update environment workloads in the database after successful deployment + async fn update_environment_workloads_in_db( + &self, + environment_id: Uuid, + environment_namespace: &str, + catalog_workloads: &[lapdev_common::kube::KubeAppCatalogWorkload], + workloads_to_deploy: &[NewEnvironmentWorkload], + service_names: HashSet, + new_catalog_sync_version: i64, + ) -> Result, ApiError> { + let now = Utc::now().into(); + let txn = self.db.conn.begin().await.map_err(ApiError::from)?; + let mut replaced_workloads: Vec<(Uuid, Uuid)> = Vec::new(); + + // Build a set of catalog workload names for efficient lookup + let catalog_workload_names: HashSet = + catalog_workloads.iter().map(|w| w.name.clone()).collect(); + + // Only soft-delete workloads that no longer exist in the catalog + let deleted_workloads = lapdev_db_entities::kube_environment_workload::Entity::find() + .filter( + lapdev_db_entities::kube_environment_workload::Column::EnvironmentId + .eq(environment_id), + ) + .filter(lapdev_db_entities::kube_environment_workload::Column::DeletedAt.is_null()) + .all(&txn) + .await + .map_err(ApiError::from)?; + + for existing_workload in deleted_workloads { + if !catalog_workload_names.contains(&existing_workload.name) { + // This workload no longer exists in the catalog, soft-delete it + lapdev_db_entities::kube_environment_workload::ActiveModel { + id: ActiveValue::Set(existing_workload.id), + deleted_at: ActiveValue::Set(Some(now)), + ..Default::default() + } + .update(&txn) + .await + .map_err(ApiError::from)?; + } + } + + // Soft-delete services that are no longer in use + let mut service_delete_query = + lapdev_db_entities::kube_environment_service::Entity::update_many() + .filter( + lapdev_db_entities::kube_environment_service::Column::EnvironmentId + .eq(environment_id), + ) + .filter(lapdev_db_entities::kube_environment_service::Column::DeletedAt.is_null()); + + if !service_names.is_empty() { + let existing: Vec = service_names.into_iter().collect(); + service_delete_query = service_delete_query.filter( + lapdev_db_entities::kube_environment_service::Column::Name.is_not_in(existing), + ); + } + + service_delete_query + .col_expr( + lapdev_db_entities::kube_environment_service::Column::DeletedAt, + Expr::value(now), + ) + .exec(&txn) + .await + .map_err(ApiError::from)?; + + // Only insert/update workloads that were actually deployed + for workload in workloads_to_deploy { + let details = &workload.details; + let containers_json = + serde_json::to_value(&details.containers).unwrap_or_else(|_| serde_json::json!([])); + let ports_json = + serde_json::to_value(&details.ports).unwrap_or_else(|_| serde_json::json!([])); + let workload_yaml = details.workload_yaml.clone(); + + let existing_workload = lapdev_db_entities::kube_environment_workload::Entity::find() + .filter( + lapdev_db_entities::kube_environment_workload::Column::EnvironmentId + .eq(environment_id), + ) + .filter( + lapdev_db_entities::kube_environment_workload::Column::Name + .eq(details.name.clone()), + ) + .filter(lapdev_db_entities::kube_environment_workload::Column::DeletedAt.is_null()) + .one(&txn) + .await + .map_err(ApiError::from)?; + + // First, soft-delete any existing workload with the same name + lapdev_db_entities::kube_environment_workload::Entity::update_many() + .filter( + lapdev_db_entities::kube_environment_workload::Column::EnvironmentId + .eq(environment_id), + ) + .filter( + lapdev_db_entities::kube_environment_workload::Column::Name + .eq(details.name.clone()), + ) + .filter(lapdev_db_entities::kube_environment_workload::Column::DeletedAt.is_null()) + .col_expr( + lapdev_db_entities::kube_environment_workload::Column::DeletedAt, + Expr::value(now), + ) + .exec(&txn) + .await + .map_err(ApiError::from)?; + + // Then insert the new version + lapdev_db_entities::kube_environment_workload::ActiveModel { + id: ActiveValue::Set(workload.id), + created_at: ActiveValue::Set(now), + deleted_at: ActiveValue::Set(None), + environment_id: ActiveValue::Set(environment_id), + name: ActiveValue::Set(details.name.clone()), + namespace: ActiveValue::Set(environment_namespace.to_string()), + kind: ActiveValue::Set(details.kind.to_string()), + containers: ActiveValue::Set(containers_json), + ports: ActiveValue::Set(ports_json), + workload_yaml: ActiveValue::Set(workload_yaml), + catalog_sync_version: ActiveValue::Set(new_catalog_sync_version), + base_workload_id: ActiveValue::Set(details.base_workload_id), + ready_replicas: ActiveValue::Set(None), + } + .insert(&txn) + .await + .map_err(ApiError::from)?; + + if let Some(existing) = existing_workload { + replaced_workloads.push((existing.id, workload.id)); + } + } + + // Update environment's catalog sync version and timestamp + lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment_id), + catalog_sync_version: ActiveValue::Set(new_catalog_sync_version), + last_catalog_synced_at: ActiveValue::Set(Some(now)), + sync_status: ActiveValue::Set(KubeEnvironmentSyncStatus::Idle.to_string()), + ..Default::default() + } + .update(&txn) + .await + .map_err(ApiError::from)?; + + txn.commit().await.map_err(ApiError::from)?; + + Ok(replaced_workloads) + } + + pub async fn sync_environment_from_catalog( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result<(), ApiError> { + // Authorize and validate that sync is needed + let (environment, catalog) = match self + .authorize_and_validate_sync(org_id, user_id, environment_id) + .await + { + Ok(result) => result, + Err(ApiError::InvalidRequest(msg)) if msg == "Environment is already up to date" => { + return Ok(()) + } + Err(e) => return Err(e), + }; + + // Mark environment as syncing + lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment.id), + sync_status: ActiveValue::Set(KubeEnvironmentSyncStatus::Syncing.to_string()), + ..Default::default() + } + .update(&self.db.conn) + .await + .map_err(ApiError::from)?; + + // Perform sync operations; reset status to idle on error + let sync_result = async { + // Determine which workloads need syncing + let (catalog_workloads, workloads_to_deploy) = self + .determine_workloads_to_sync(environment.id, catalog.id) + .await?; + + // Deploy changed workloads to Kubernetes + let (service_names, workload_details) = self + .perform_workload_deployment( + &environment, + &catalog, + &workloads_to_deploy, + catalog_workloads.len(), + ) + .await?; + + Ok::<_, ApiError>((catalog_workloads, workload_details, service_names)) + } + .await; + + // Handle sync result - reset status on failure + let (catalog_workloads, workload_details, service_names) = match sync_result { + Ok(result) => result, + Err(e) => { + // Reset sync_status to idle on failure + let _ = lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment.id), + sync_status: ActiveValue::Set(KubeEnvironmentSyncStatus::Idle.to_string()), + ..Default::default() + } + .update(&self.db.conn) + .await; + return Err(e); + } + }; + + // Update database with synced workloads + let replaced_workloads = self + .update_environment_workloads_in_db( + environment.id, + &environment.namespace, + &catalog_workloads, + &workload_details, + service_names, + catalog.sync_version, + ) + .await?; + + self.relink_branch_overrides(&environment, &replaced_workloads, catalog.sync_version) + .await?; + + Ok(()) + } + + pub async fn rebase_branch_environment( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result<(), ApiError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + if environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + let base_environment_id = environment.base_environment_id.ok_or_else(|| { + ApiError::InvalidRequest( + "Only branch environments can be rebased from a shared environment".to_string(), + ) + })?; + + let base_environment = self + .db + .get_kube_environment(base_environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Base environment not found".to_string()))?; + + if base_environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment.id), + sync_status: ActiveValue::Set(KubeEnvironmentSyncStatus::Syncing.to_string()), + ..Default::default() + } + .update(&self.db.conn) + .await + .map_err(ApiError::from)?; + + let rebase_result = self + .reapply_branch_overrides_from_base(&environment, &base_environment) + .await; + + if let Err(err) = rebase_result { + let _ = lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment.id), + sync_status: ActiveValue::Set(KubeEnvironmentSyncStatus::Idle.to_string()), + ..Default::default() + } + .update(&self.db.conn) + .await; + return Err(err); + } + + let now = Utc::now().into(); + lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment.id), + catalog_sync_version: ActiveValue::Set(base_environment.catalog_sync_version), + last_catalog_synced_at: ActiveValue::Set(Some(now)), + sync_status: ActiveValue::Set(KubeEnvironmentSyncStatus::Idle.to_string()), + ..Default::default() + } + .update(&self.db.conn) + .await + .map_err(ApiError::from)?; + + Ok(()) + } + + // Kube Namespace operations + pub async fn create_kube_namespace( + &self, + org_id: Uuid, + user_id: Uuid, + name: String, + description: Option, + is_shared: bool, + ) -> Result { + let namespace = self + .db + .create_kube_namespace(org_id, user_id, name, description, is_shared) + .await + .map_err(ApiError::from)?; + + Ok(lapdev_common::kube::KubeNamespace { + id: namespace.id, + name: namespace.name, + description: namespace.description, + is_shared: namespace.is_shared, + created_at: namespace.created_at, + created_by: namespace.user_id, + }) + } + + pub async fn get_all_kube_namespaces( + &self, + org_id: Uuid, + user_id: Uuid, + is_shared: bool, + ) -> Result, ApiError> { + let namespaces = self + .db + .get_all_kube_namespaces(org_id, user_id, is_shared) + .await + .map_err(ApiError::from)?; + + let kube_namespaces = namespaces + .into_iter() + .map(|ns| lapdev_common::kube::KubeNamespace { + id: ns.id, + name: ns.name, + description: ns.description, + is_shared: ns.is_shared, + created_at: ns.created_at, + created_by: ns.user_id, + }) + .collect(); + + Ok(kube_namespaces) + } + + pub async fn delete_kube_namespace( + &self, + _org_id: Uuid, + namespace_id: Uuid, + ) -> Result<(), ApiError> { + self.db + .delete_kube_namespace(namespace_id) + .await + .map_err(ApiError::from)?; + + Ok(()) + } + + async fn publish_environment_event(&self, event: EnvironmentLifecycleEvent) { + match serde_json::to_string(&event) { + Ok(payload) => { + if let Some(pool) = self.db.pool.clone() { + if let Err(err) = sqlx::query("SELECT pg_notify('environment_lifecycle', $1)") + .bind(payload) + .execute(&pool) + .await + { + warn!( + error = %err, + "failed to publish environment lifecycle event via NOTIFY" + ); + let _ = self.environment_events.send(event); + } + } else { + warn!("pg pool unavailable; broadcasting environment event locally"); + let _ = self.environment_events.send(event); + } + } + Err(err) => { + warn!( + error = %err, + "failed to serialize environment lifecycle event" + ); + } + } + } + + async fn update_environment_status( + &self, + environment_id: Uuid, + status: KubeEnvironmentStatus, + paused_at: Option>>, + resumed_at: Option>>, + ) -> Result<(), ApiError> { + let paused_value = match paused_at { + Some(Some(dt)) => ActiveValue::Set(Some(dt.into())), + Some(None) => ActiveValue::Set(None), + None => ActiveValue::NotSet, + }; + + let resumed_value = match resumed_at { + Some(Some(dt)) => ActiveValue::Set(Some(dt.into())), + Some(None) => ActiveValue::Set(None), + None => ActiveValue::NotSet, + }; + + lapdev_db_entities::kube_environment::ActiveModel { + id: ActiveValue::Set(environment_id), + status: ActiveValue::Set(status.to_string()), + paused_at: paused_value, + resumed_at: resumed_value, + ..Default::default() + } + .update(&self.db.conn) + .await + .map_err(ApiError::from)?; + + if let Ok(Some(environment)) = self.db.get_kube_environment(environment_id).await { + let event = EnvironmentLifecycleEvent { + organization_id: environment.organization_id, + environment_id, + status: status.clone(), + paused_at: environment.paused_at.map(|dt| dt.to_string()), + resumed_at: environment.resumed_at.map(|dt| dt.to_string()), + updated_at: Utc::now(), + }; + + self.publish_environment_event(event).await; + } + + Ok(()) + } + + async fn run_pause_environment(&self, environment_id: Uuid) -> Result<(), ApiError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + let server = self + .get_random_kube_cluster_server(environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for the environment target cluster".to_string(), + ) + })?; + + let mut workloads_with_resources = + self.assemble_environment_workloads(&environment).await?; + + for workload in &mut workloads_with_resources.workloads { + let result = match workload { + KubeWorkloadYamlOnly::CronJob(_) => set_cronjob_suspend(workload, true), + KubeWorkloadYamlOnly::Deployment(_) + | KubeWorkloadYamlOnly::StatefulSet(_) + | KubeWorkloadYamlOnly::ReplicaSet(_) => set_workload_replicas(workload, 0), + KubeWorkloadYamlOnly::DaemonSet(_) => set_daemonset_paused(workload, true), + KubeWorkloadYamlOnly::Pod(_) => { + warn!( + environment_id = %environment.id, + "Pod workloads cannot be gracefully paused; they will continue running" + ); + Ok(()) + } + _ => Ok(()), + }; + if let Err(err) = result { + warn!( + environment_id = %environment.id, + error = ?err, + "Failed to update workload YAML while pausing environment" + ); + } + } + + self.deploy_environment_resources(&server, &environment, workloads_with_resources, None) + .await?; + + if let Some(base_env_id) = environment.base_environment_id { + if let Err(err) = self + .notify_branch_environment_deletion(&server.rpc_client, base_env_id, environment.id) + .await + { + warn!( + environment_id = %environment.id, + error = ?err, + "Failed to notify devbox proxy about branch environment pause" + ); + } + } + + self.update_environment_status( + environment.id, + KubeEnvironmentStatus::Paused, + Some(Some(Utc::now())), + Some(None), + ) + .await + } + + async fn run_resume_environment(&self, environment_id: Uuid) -> Result<(), ApiError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + let server = self + .get_random_kube_cluster_server(environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for the environment target cluster".to_string(), + ) + })?; + + let mut workloads_with_resources = + self.assemble_environment_workloads(&environment).await?; + + for workload in &mut workloads_with_resources.workloads { + let result = match workload { + KubeWorkloadYamlOnly::CronJob(_) => set_cronjob_suspend(workload, false), + KubeWorkloadYamlOnly::Deployment(_) + | KubeWorkloadYamlOnly::StatefulSet(_) + | KubeWorkloadYamlOnly::ReplicaSet(_) => set_workload_replicas(workload, 1), + KubeWorkloadYamlOnly::DaemonSet(_) => set_daemonset_paused(workload, false), + KubeWorkloadYamlOnly::Pod(_) => Ok(()), + _ => Ok(()), + }; + if let Err(err) = result { + warn!( + environment_id = %environment.id, + error = ?err, + "Failed to update workload YAML while resuming environment" + ); + } + } + + self.deploy_environment_resources(&server, &environment, workloads_with_resources, None) + .await?; + + if let Some(base_env_id) = environment.base_environment_id { + self.notify_branch_environment_creation( + base_env_id, + environment.cluster_id, + &environment, + ) + .await; + } + + self.update_environment_status( + environment.id, + KubeEnvironmentStatus::Running, + None, + Some(Some(Utc::now())), + ) + .await + } +} diff --git a/crates/api/src/kube_controller/mod.rs b/crates/api/src/kube_controller/mod.rs new file mode 100644 index 0000000..0e08327 --- /dev/null +++ b/crates/api/src/kube_controller/mod.rs @@ -0,0 +1,362 @@ +use std::{ + collections::{BTreeMap, HashMap}, + net::SocketAddr, + sync::{atomic::AtomicU64, Arc}, + time::{Duration, Instant}, +}; +use tokio::sync::{broadcast, RwLock}; +use uuid::Uuid; + +use crate::environment_events::EnvironmentLifecycleEvent; +use lapdev_common::devbox::DirectTunnelConfig; +use lapdev_db::api::DbApi; +use lapdev_kube::{server::KubeClusterServer, tunnel::TunnelRegistry}; +use lapdev_kube_rpc::{DevboxRouteConfig, ProxyBranchRouteConfig}; +use tracing::{debug, warn}; + +// Submodules +mod app_catalog; +mod cluster; +pub(crate) mod container_images; +mod deployment; +mod environment; +mod preview_url; +mod resources; +mod service; +pub mod validation; +mod workload; +pub mod yaml_parser; + +// Re-exports +pub use self::container_images::CONTAINER_IMAGE_TAG; +pub use validation::*; +pub use yaml_parser::*; + +pub(crate) enum EnvironmentNamespaceKind { + Personal, + Shared, + Branch, +} + +#[derive(Clone)] +pub struct KubeController { + // KubeManager connections per cluster + pub kube_cluster_servers: Arc>>>, + pub kube_cluster_server_generation: Arc, + // Tunnel registry for preview URL functionality + pub tunnel_registry: Arc, + // Database API + pub db: DbApi, + // Broadcast channel for environment lifecycle events + pub environment_events: broadcast::Sender, +} + +impl KubeController { + const KUBE_MANAGER_RPC_TIMEOUT_SECS: u64 = 300; + + fn kube_manager_rpc_context_with_timeout(timeout: Duration) -> tarpc::context::Context { + let mut ctx = tarpc::context::current(); + ctx.deadline = Instant::now() + timeout; + ctx + } + + fn kube_manager_rpc_context() -> tarpc::context::Context { + Self::kube_manager_rpc_context_with_timeout(Duration::from_secs( + Self::KUBE_MANAGER_RPC_TIMEOUT_SECS, + )) + } + + pub fn new( + db: DbApi, + environment_events: broadcast::Sender, + ) -> Self { + Self { + kube_cluster_servers: Arc::new(RwLock::new(HashMap::new())), + kube_cluster_server_generation: Arc::new(AtomicU64::new(1)), + tunnel_registry: Arc::new(TunnelRegistry::new()), + db, + environment_events, + } + } + + pub async fn get_random_kube_cluster_server( + &self, + cluster_id: Uuid, + ) -> Option { + let servers = self.kube_cluster_servers.read().await; + servers + .get(&cluster_id)? + .iter() + .next_back() + .map(|(_, server)| server.clone()) + } + + pub async fn refresh_cluster_namespace_watches(&self, cluster_id: Uuid) { + let namespaces = match self.db.get_cluster_watch_namespaces(cluster_id).await { + Ok(namespaces) => namespaces, + Err(err) => { + warn!( + cluster_id = %cluster_id, + error = ?err, + "Failed to load namespaces for watch configuration" + ); + return; + } + }; + + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + debug!( + cluster_id = %cluster_id, + "No connected KubeManager instances; skipping namespace watch refresh" + ); + return; + }; + + for server in cluster_servers.values() { + if let Err(err) = server + .send_namespace_watch_configuration(namespaces.clone()) + .await + { + warn!( + cluster_id = %cluster_id, + error = ?err, + "Failed to send namespace watch configuration to KubeManager" + ); + } + } + } + + pub async fn set_devbox_routes( + &self, + cluster_id: Uuid, + environment_id: Uuid, + routes: HashMap, + ) -> Result<(), String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server + .set_devbox_routes(environment_id, routes.clone()) + .await + { + Ok(()) => return Ok(()), + Err(err) => last_err = Some(err), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch set_devbox_routes to any KubeManager instances".to_string() + })) + } + + pub async fn set_devbox_route( + &self, + cluster_id: Uuid, + environment_id: Uuid, + route: DevboxRouteConfig, + ) -> Result<(), String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server.set_devbox_route(environment_id, route.clone()).await { + Ok(()) => return Ok(()), + Err(err) => last_err = Some(err), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch set_devbox_route to any KubeManager instances".to_string() + })) + } + + pub async fn remove_devbox_route( + &self, + cluster_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + branch_environment_id: Option, + ) -> Result<(), String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server + .remove_devbox_route(environment_id, workload_id, branch_environment_id) + .await + { + Ok(()) => return Ok(()), + Err(err) => last_err = Some(err), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch remove_devbox_route to any KubeManager instances".to_string() + })) + } + + pub async fn clear_devbox_routes( + &self, + cluster_id: Uuid, + environment_id: Uuid, + branch_environment_id: Option, + ) -> Result<(), String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server + .clear_devbox_routes(environment_id, branch_environment_id) + .await + { + Ok(()) => return Ok(()), + Err(err) => last_err = Some(err), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch clear_devbox_routes to any KubeManager instances".to_string() + })) + } + + pub async fn request_devbox_direct_config( + &self, + cluster_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + namespace: String, + stun_observed_addr: Option, + ) -> Result, String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server + .get_devbox_direct_config( + user_id, + environment_id, + namespace.clone(), + stun_observed_addr, + ) + .await + { + Ok(config) => return Ok(config), + Err(err) => last_err = Some(err), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch get_devbox_direct_config to any KubeManager instances".to_string() + })) + } + + pub async fn update_branch_service_route( + &self, + cluster_id: Uuid, + base_environment_id: Uuid, + workload_id: Uuid, + route: ProxyBranchRouteConfig, + ) -> Result<(), String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server + .rpc_client + .update_branch_service_route( + Self::kube_manager_rpc_context(), + base_environment_id, + workload_id, + route.clone(), + ) + .await + { + Ok(Ok(())) => return Ok(()), + Ok(Err(err)) => last_err = Some(err), + Err(err) => last_err = Some(format!("{}", err)), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch update_branch_service_route to any KubeManager instances" + .to_string() + })) + } + + pub async fn remove_branch_service_route( + &self, + cluster_id: Uuid, + base_environment_id: Uuid, + workload_id: Uuid, + branch_environment_id: Uuid, + ) -> Result<(), String> { + let servers = self.kube_cluster_servers.read().await; + let Some(cluster_servers) = servers.get(&cluster_id) else { + return Err(format!( + "No connected KubeManager instances for cluster {}", + cluster_id + )); + }; + + let mut last_err: Option = None; + for server in cluster_servers.values() { + match server + .rpc_client + .remove_branch_service_route( + Self::kube_manager_rpc_context(), + base_environment_id, + workload_id, + branch_environment_id, + ) + .await + { + Ok(Ok(())) => return Ok(()), + Ok(Err(err)) => last_err = Some(err), + Err(err) => last_err = Some(format!("{}", err)), + } + } + + Err(last_err.unwrap_or_else(|| { + "Failed to dispatch remove_branch_service_route to any KubeManager instances" + .to_string() + })) + } +} diff --git a/crates/api/src/kube_controller/preview_url.rs b/crates/api/src/kube_controller/preview_url.rs new file mode 100644 index 0000000..4108faa --- /dev/null +++ b/crates/api/src/kube_controller/preview_url.rs @@ -0,0 +1,215 @@ +use uuid::Uuid; + +use lapdev_common::{kube::KubeEnvironmentPreviewUrl, utils::rand_string, LAPDEV_API_HOST}; +use lapdev_rpc::error::ApiError; + +use super::KubeController; + +impl KubeController { + pub async fn create_environment_preview_url( + &self, + user_id: Uuid, + environment: lapdev_db_entities::kube_environment::Model, + request: lapdev_common::kube::CreateKubeEnvironmentPreviewUrlRequest, + ) -> Result { + // Verify service exists and belongs to the environment (or its base environment) + let service = self + .db + .get_kube_environment_service_by_id(request.service_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Service not found".to_string()))?; + + if let Some(base_environment_id) = environment.base_environment_id { + // Branch environment: service must belong to the base environment + if service.environment_id != base_environment_id { + return Err(ApiError::InvalidRequest( + "Service not found in base environment".to_string(), + )); + } + } else if service.environment_id != environment.id { + return Err(ApiError::InvalidRequest( + "Service not found in environment".to_string(), + )); + } + + // Validate the port exists on the service + let port_exists = service.ports.iter().any(|p| { + p.port == request.port + && (request.port_name.is_none() || p.name.as_ref() == request.port_name.as_ref()) + }); + + if !port_exists { + return Err(ApiError::InvalidRequest( + "Specified port does not exist on the service".to_string(), + )); + } + + // Auto-generate name based on service and port + let auto_name = format!("{}-{}-{}", request.port, service.name, rand_string(12)); + + // Set defaults + let protocol = request.protocol.unwrap_or_else(|| "HTTP".to_string()); + let access_level = request + .access_level + .unwrap_or(lapdev_common::kube::PreviewUrlAccessLevel::Organization); + + let url = format!("https://{auto_name}.{}", LAPDEV_API_HOST); + + // Create preview URL in database + let preview_url = match self + .db + .create_environment_preview_url( + environment.id, + request.service_id, + user_id, + auto_name, + request.description, + request.port, + request.port_name, + protocol.clone(), + access_level.clone(), + ) + .await + { + Ok(url) => url, + Err(db_err) => { + // Check if this is a unique constraint violation + if matches!( + db_err.sql_err(), + Some(sea_orm::SqlErr::UniqueConstraintViolation(_)) + ) { + return Err(ApiError::InvalidRequest( + "A preview URL already exists for this service and port combination" + .to_string(), + )); + } else { + return Err(ApiError::from(anyhow::Error::from(db_err))); + } + } + }; + + Ok(KubeEnvironmentPreviewUrl { + id: preview_url.id, + created_at: preview_url.created_at, + environment_id: preview_url.environment_id, + service_id: preview_url.service_id, + name: preview_url.name, + description: preview_url.description, + port: preview_url.port, + port_name: preview_url.port_name, + protocol: preview_url.protocol, + access_level: preview_url + .access_level + .parse() + .unwrap_or(lapdev_common::kube::PreviewUrlAccessLevel::Organization), + created_by: preview_url.created_by, + last_accessed_at: preview_url.last_accessed_at, + url, + }) + } + + pub async fn get_environment_preview_urls( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result, ApiError> { + // Verify environment belongs to the organization and check ownership + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // If it's a personal environment, check ownership + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + let preview_urls = self + .db + .get_environment_preview_urls(environment_id) + .await + .map_err(ApiError::from)?; + + Ok(preview_urls + .into_iter() + .map(|preview_url| { + let url = format!("https://{}.{}", preview_url.name, LAPDEV_API_HOST); + + KubeEnvironmentPreviewUrl { + id: preview_url.id, + created_at: preview_url.created_at, + environment_id: preview_url.environment_id, + service_id: preview_url.service_id, + name: preview_url.name, + description: preview_url.description, + port: preview_url.port, + port_name: preview_url.port_name, + protocol: preview_url.protocol, + access_level: preview_url + .access_level + .parse() + .unwrap_or(lapdev_common::kube::PreviewUrlAccessLevel::Organization), + created_by: preview_url.created_by, + last_accessed_at: preview_url.last_accessed_at, + url, + } + }) + .collect()) + } + + pub async fn update_environment_preview_url( + &self, + preview_url_id: Uuid, + request: lapdev_common::kube::UpdateKubeEnvironmentPreviewUrlRequest, + ) -> Result { + // Update the preview URL + let updated_preview_url = self + .db + .update_environment_preview_url( + preview_url_id, + request.description, + request.access_level, + ) + .await + .map_err(ApiError::from)?; + + let url = format!("https://{}.{}", updated_preview_url.name, LAPDEV_API_HOST); + + Ok(KubeEnvironmentPreviewUrl { + id: updated_preview_url.id, + created_at: updated_preview_url.created_at, + environment_id: updated_preview_url.environment_id, + service_id: updated_preview_url.service_id, + name: updated_preview_url.name, + description: updated_preview_url.description, + port: updated_preview_url.port, + port_name: updated_preview_url.port_name, + protocol: updated_preview_url.protocol, + access_level: updated_preview_url + .access_level + .parse() + .unwrap_or(lapdev_common::kube::PreviewUrlAccessLevel::Organization), + created_by: updated_preview_url.created_by, + last_accessed_at: updated_preview_url.last_accessed_at, + url, + }) + } + + pub async fn delete_environment_preview_url( + &self, + preview_url_id: Uuid, + ) -> Result<(), ApiError> { + self.db + .delete_environment_preview_url(preview_url_id) + .await + .map_err(ApiError::from) + } +} diff --git a/crates/api/src/kube_controller/resource_cleaner.rs b/crates/api/src/kube_controller/resource_cleaner.rs new file mode 100644 index 0000000..122a8fa --- /dev/null +++ b/crates/api/src/kube_controller/resource_cleaner.rs @@ -0,0 +1,30 @@ +use k8s_openapi::api::core::v1::{ConfigMap, Secret}; + +pub fn clean_configmap(configmap: ConfigMap) -> ConfigMap { + ConfigMap { + metadata: clean_metadata(configmap.metadata), + data: configmap.data, + binary_data: configmap.binary_data, + immutable: configmap.immutable, + } +} + +pub fn clean_secret(secret: Secret) -> Secret { + Secret { + metadata: clean_metadata(secret.metadata), + data: secret.data, + string_data: secret.string_data, + type_: secret.type_, + immutable: secret.immutable, + } +} + +fn clean_metadata( + metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta, +) -> k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta { + k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta { + name: metadata.name, + labels: metadata.labels, + ..Default::default() + } +} diff --git a/crates/api/src/kube_controller/resources.rs b/crates/api/src/kube_controller/resources.rs new file mode 100644 index 0000000..b0c228e --- /dev/null +++ b/crates/api/src/kube_controller/resources.rs @@ -0,0 +1,955 @@ +use std::collections::HashMap; + +use anyhow::{anyhow, Result}; +use k8s_openapi::{ + api::{ + apps::v1::{ + DaemonSet, DaemonSetSpec, Deployment, DeploymentSpec, ReplicaSet, ReplicaSetSpec, + StatefulSet, StatefulSetSpec, + }, + batch::v1::{CronJob, CronJobSpec, Job, JobSpec}, + core::v1::{ + ConfigMap, Container, ContainerPort, EnvVar, EnvVarSource, ObjectFieldSelector, Pod, + PodSpec, PodTemplateSpec, Secret, Service, + }, + }, + apimachinery::pkg::{api::resource::Quantity, apis::meta::v1::ObjectMeta}, +}; +use lapdev_common::kube::{ + KubeContainerImage, KubeContainerInfo, KubeWorkloadKind, ProxyPortRoute, + DEFAULT_SIDECAR_PROXY_BIND_ADDR, DEFAULT_SIDECAR_PROXY_METRICS_PORT, + DEFAULT_SIDECAR_PROXY_PORT, SIDECAR_PROXY_BIND_ADDR_ENV_VAR, + SIDECAR_PROXY_MANAGER_ADDR_ENV_VAR, SIDECAR_PROXY_PORT_ENV_VAR, SIDECAR_PROXY_WORKLOAD_ENV_VAR, +}; +use lapdev_kube_rpc::KubeWorkloadYamlOnly; +use serde_json; +use uuid::Uuid; + +use super::container_images; + +#[derive(Debug)] +pub struct SidecarInjectionOptions<'a> { + pub environment_id: Uuid, + pub workload_id: Uuid, + pub namespace: &'a str, + pub auth_token: &'a str, + pub manager_namespace: Option<&'a str>, + pub proxy_routes: &'a [ProxyPortRoute], +} + +/// Rebuild workload YAML so it reflects the latest container configuration while +/// stripping server-managed metadata. Mirrors kube-manager's clean_* helpers. +pub fn rebuild_workload_yaml( + kind: &KubeWorkloadKind, + yaml: &str, + containers: &[KubeContainerInfo], + sidecar: Option<&SidecarInjectionOptions<'_>>, +) -> Result { + match kind { + KubeWorkloadKind::Deployment => { + let deployment: Deployment = serde_yaml::from_str(yaml)?; + let mut cleaned = clean_deployment(deployment, containers); + if let Some(options) = sidecar { + inject_sidecar_proxy_into_deployment(&mut cleaned, options)?; + } + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::StatefulSet => { + let statefulset: StatefulSet = serde_yaml::from_str(yaml)?; + let cleaned = clean_statefulset(statefulset, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::DaemonSet => { + let daemonset: DaemonSet = serde_yaml::from_str(yaml)?; + let cleaned = clean_daemonset(daemonset, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::ReplicaSet => { + let replicaset: ReplicaSet = serde_yaml::from_str(yaml)?; + let cleaned = clean_replicaset(replicaset, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::Pod => { + let pod: Pod = serde_yaml::from_str(yaml)?; + let cleaned = clean_pod(pod, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::Job => { + let job: Job = serde_yaml::from_str(yaml)?; + let cleaned = clean_job(job, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::CronJob => { + let cronjob: CronJob = serde_yaml::from_str(yaml)?; + let cleaned = clean_cronjob(cronjob, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + } +} + +pub fn inject_sidecar_proxy_into_deployment( + deployment: &mut Deployment, + options: &SidecarInjectionOptions<'_>, +) -> Result<()> { + let spec = match deployment.spec.as_mut() { + Some(spec) => spec, + None => return Err(anyhow!("Deployment spec missing for sidecar injection")), + }; + + let pod_spec = spec + .template + .spec + .as_mut() + .ok_or_else(|| anyhow!("Deployment pod spec missing for sidecar injection"))?; + + let routes_json = if options.proxy_routes.is_empty() { + None + } else { + Some(serde_json::to_string(options.proxy_routes)?) + }; + + let sidecar_index = ensure_sidecar_proxy_container( + pod_spec, + options.environment_id, + options.workload_id, + options.namespace, + options.auth_token, + options.manager_namespace, + routes_json.as_deref(), + ); + + if let (Some(json), Some(container)) = (routes_json, pod_spec.containers.get_mut(sidecar_index)) + { + upsert_env_var( + container, + "LAPDEV_SIDECAR_PROXY_PORT_ROUTES", + json.to_string(), + ); + } + + Ok(()) +} + +#[allow(dead_code)] +pub fn clean_configmap(configmap: ConfigMap) -> ConfigMap { + ConfigMap { + metadata: clean_metadata(configmap.metadata), + data: configmap.data, + binary_data: configmap.binary_data, + immutable: configmap.immutable, + } +} + +#[allow(dead_code)] +pub fn clean_secret(secret: Secret) -> Secret { + Secret { + metadata: clean_metadata(secret.metadata), + data: secret.data, + string_data: secret.string_data, + type_: secret.type_, + immutable: secret.immutable, + } +} + +#[allow(dead_code)] +pub fn clean_service(service: Service) -> Service { + let clean_spec = service.spec.map(|mut original_spec| { + original_spec.cluster_ip = None; + original_spec.cluster_ips = None; + original_spec.health_check_node_port = None; + + if let Some(ports) = original_spec.ports.as_mut() { + for port in ports { + port.node_port = None; + } + } + + original_spec + }); + + Service { + metadata: clean_metadata(service.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_metadata(metadata: ObjectMeta) -> ObjectMeta { + ObjectMeta { + name: metadata.name, + labels: metadata.labels, + ..Default::default() + } +} + +fn clean_deployment( + deployment: Deployment, + workload_containers: &[KubeContainerInfo], +) -> Deployment { + let clean_spec = deployment.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + DeploymentSpec { + replicas: Some(1), + selector: original_spec.selector, + template, + min_ready_seconds: original_spec.min_ready_seconds, + paused: original_spec.paused, + progress_deadline_seconds: original_spec.progress_deadline_seconds, + revision_history_limit: original_spec.revision_history_limit, + strategy: original_spec.strategy, + } + }); + + Deployment { + metadata: clean_metadata(deployment.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_statefulset( + statefulset: StatefulSet, + workload_containers: &[KubeContainerInfo], +) -> StatefulSet { + let clean_spec = statefulset.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + StatefulSetSpec { + service_name: original_spec.service_name, + replicas: Some(1), + selector: original_spec.selector, + template, + volume_claim_templates: original_spec.volume_claim_templates, + update_strategy: original_spec.update_strategy, + min_ready_seconds: original_spec.min_ready_seconds, + persistent_volume_claim_retention_policy: original_spec + .persistent_volume_claim_retention_policy, + ordinals: original_spec.ordinals, + revision_history_limit: original_spec.revision_history_limit, + pod_management_policy: original_spec.pod_management_policy, + } + }); + + StatefulSet { + metadata: clean_metadata(statefulset.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_daemonset(daemonset: DaemonSet, workload_containers: &[KubeContainerInfo]) -> DaemonSet { + let clean_spec = daemonset.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + DaemonSetSpec { + selector: original_spec.selector, + template, + update_strategy: original_spec.update_strategy, + min_ready_seconds: original_spec.min_ready_seconds, + revision_history_limit: original_spec.revision_history_limit, + } + }); + + DaemonSet { + metadata: clean_metadata(daemonset.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_replicaset( + replicaset: ReplicaSet, + workload_containers: &[KubeContainerInfo], +) -> ReplicaSet { + let clean_spec = replicaset.spec.map(|original_spec| { + let template = original_spec + .template + .map(|t| merge_template_containers(t, workload_containers)); + + ReplicaSetSpec { + replicas: Some(1), + selector: original_spec.selector, + template, + min_ready_seconds: original_spec.min_ready_seconds, + } + }); + + ReplicaSet { + metadata: clean_metadata(replicaset.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_pod(pod: Pod, workload_containers: &[KubeContainerInfo]) -> Pod { + let clean_spec = pod.spec.map(|original_spec| { + let merged_containers = merge_containers(original_spec.containers, workload_containers); + + PodSpec { + active_deadline_seconds: original_spec.active_deadline_seconds, + containers: merged_containers, + init_containers: original_spec.init_containers, + ephemeral_containers: original_spec.ephemeral_containers, + volumes: original_spec.volumes, + restart_policy: original_spec.restart_policy, + termination_grace_period_seconds: original_spec.termination_grace_period_seconds, + dns_policy: original_spec.dns_policy, + dns_config: original_spec.dns_config, + node_selector: original_spec.node_selector, + service_account_name: None, + service_account: None, + automount_service_account_token: None, + security_context: original_spec.security_context, + image_pull_secrets: original_spec.image_pull_secrets, + affinity: original_spec.affinity, + tolerations: original_spec.tolerations, + topology_spread_constraints: original_spec.topology_spread_constraints, + priority_class_name: original_spec.priority_class_name, + priority: original_spec.priority, + preemption_policy: original_spec.preemption_policy, + overhead: original_spec.overhead, + enable_service_links: original_spec.enable_service_links, + os: original_spec.os, + host_users: original_spec.host_users, + scheduling_gates: original_spec.scheduling_gates, + resource_claims: original_spec.resource_claims, + ..Default::default() + } + }); + + Pod { + metadata: clean_metadata(pod.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_job(job: Job, workload_containers: &[KubeContainerInfo]) -> Job { + let clean_spec = job.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + JobSpec { + template, + parallelism: original_spec.parallelism, + completions: original_spec.completions, + completion_mode: original_spec.completion_mode, + active_deadline_seconds: original_spec.active_deadline_seconds, + backoff_limit: original_spec.backoff_limit, + backoff_limit_per_index: original_spec.backoff_limit_per_index, + max_failed_indexes: original_spec.max_failed_indexes, + selector: original_spec.selector, + manual_selector: original_spec.manual_selector, + ttl_seconds_after_finished: original_spec.ttl_seconds_after_finished, + suspend: original_spec.suspend, + pod_failure_policy: original_spec.pod_failure_policy, + pod_replacement_policy: original_spec.pod_replacement_policy, + managed_by: original_spec.managed_by, + success_policy: original_spec.success_policy, + } + }); + + Job { + metadata: clean_metadata(job.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_cronjob(cronjob: CronJob, workload_containers: &[KubeContainerInfo]) -> CronJob { + let clean_spec = cronjob.spec.map(|original_spec| { + let mut job_template = original_spec.job_template; + if let Some(job_spec) = &mut job_template.spec { + job_spec.template = + merge_template_containers(job_spec.template.clone(), workload_containers); + } + + CronJobSpec { + schedule: original_spec.schedule, + time_zone: original_spec.time_zone, + starting_deadline_seconds: original_spec.starting_deadline_seconds, + concurrency_policy: original_spec.concurrency_policy, + suspend: original_spec.suspend, + job_template, + successful_jobs_history_limit: original_spec.successful_jobs_history_limit, + failed_jobs_history_limit: original_spec.failed_jobs_history_limit, + } + }); + + CronJob { + metadata: clean_metadata(cronjob.metadata), + spec: clean_spec, + status: None, + } +} + +fn merge_template_containers( + template: PodTemplateSpec, + workload_containers: &[KubeContainerInfo], +) -> PodTemplateSpec { + let pod_spec = template.spec.map(|original_pod_spec| { + let mut new_spec = original_pod_spec; + let merged_containers = merge_containers(new_spec.containers, workload_containers); + new_spec.containers = merged_containers; + new_spec.service_account_name = None; + new_spec.service_account = None; + new_spec.automount_service_account_token = None; + new_spec + }); + + PodTemplateSpec { + spec: pod_spec, + ..template + } +} + +fn merge_containers( + containers: Vec, + workload_containers: &[KubeContainerInfo], +) -> Vec { + containers + .into_iter() + .map(|container| { + if let Some(workload_container) = workload_containers + .iter() + .find(|wc| wc.name == container.name) + { + merge_single_container(container, workload_container) + } else { + container + } + }) + .collect() +} + +fn merge_single_container( + container: Container, + workload_container: &KubeContainerInfo, +) -> Container { + let mut new_container = container.clone(); + + match &workload_container.image { + KubeContainerImage::FollowOriginal => { + if !workload_container.original_image.is_empty() { + new_container.image = Some(workload_container.original_image.clone()); + } + } + KubeContainerImage::Custom(custom_image) => { + if !custom_image.is_empty() { + new_container.image = Some(custom_image.clone()); + } + } + } + + let mut resources = container.resources.unwrap_or_default(); + let mut requests = resources.requests.unwrap_or_default(); + let mut limits = resources.limits.unwrap_or_default(); + + if let Some(cpu_request) = &workload_container.cpu_request { + if !cpu_request.is_empty() { + requests.insert("cpu".to_string(), Quantity(cpu_request.clone())); + } + } + if let Some(memory_request) = &workload_container.memory_request { + if !memory_request.is_empty() { + requests.insert("memory".to_string(), Quantity(memory_request.clone())); + } + } + + if let Some(cpu_limit) = &workload_container.cpu_limit { + if !cpu_limit.is_empty() { + limits.insert("cpu".to_string(), Quantity(cpu_limit.clone())); + } + } + if let Some(memory_limit) = &workload_container.memory_limit { + if !memory_limit.is_empty() { + limits.insert("memory".to_string(), Quantity(memory_limit.clone())); + } + } + + resources.requests = if requests.is_empty() { + None + } else { + Some(requests) + }; + resources.limits = if limits.is_empty() { + None + } else { + Some(limits) + }; + new_container.resources = Some(resources); + + let mut env_map: HashMap, Option)> = HashMap::new(); + + if !workload_container.original_env_vars.is_empty() { + for env_var in &workload_container.original_env_vars { + env_map.insert(env_var.name.clone(), (Some(env_var.value.clone()), None)); + } + } + + for kube_env_var in &workload_container.env_vars { + env_map.insert( + kube_env_var.name.clone(), + (Some(kube_env_var.value.clone()), None), + ); + } + + let merged_env: Vec = env_map + .into_iter() + .map(|(name, (value, value_from))| EnvVar { + name, + value, + value_from, + }) + .collect(); + + new_container.env = Some(merged_env); + + if !workload_container.ports.is_empty() { + let ports: Vec = workload_container + .ports + .iter() + .map(|port| ContainerPort { + name: port.name.clone(), + container_port: port.container_port, + protocol: port.protocol.clone(), + ..Default::default() + }) + .collect(); + new_container.ports = Some(ports); + } + + new_container +} + +fn ensure_sidecar_proxy_container( + pod_spec: &mut PodSpec, + environment_id: Uuid, + workload_id: Uuid, + namespace: &str, + auth_token: &str, + manager_namespace: Option<&str>, + proxy_routes_json: Option<&str>, +) -> usize { + let manager_namespace = manager_namespace.unwrap_or("lapdev"); + let manager_addr = format!("lapdev-kube-manager.{manager_namespace}.svc:5001"); + + if let Some(index) = pod_spec + .containers + .iter() + .position(|container| container.name == "lapdev-sidecar-proxy") + { + let container = &mut pod_spec.containers[index]; + ensure_sidecar_ports(container); + apply_sidecar_env( + container, + environment_id, + workload_id, + namespace, + auth_token, + &manager_addr, + ); + if let Some(json) = proxy_routes_json { + upsert_env_var( + container, + "LAPDEV_SIDECAR_PROXY_PORT_ROUTES", + json.to_string(), + ); + } + return index; + } + + let container = build_sidecar_proxy_container( + environment_id, + workload_id, + namespace, + auth_token, + &manager_addr, + proxy_routes_json.map(|value| value.to_string()), + ); + pod_spec.containers.push(container); + pod_spec.containers.len() - 1 +} + +fn build_sidecar_proxy_container( + environment_id: Uuid, + workload_id: Uuid, + namespace: &str, + auth_token: &str, + manager_addr: &str, + proxy_routes_json: Option, +) -> Container { + let sidecar_image = container_images::sidecar_proxy_image_reference(); + + let mut container = Container { + name: "lapdev-sidecar-proxy".to_string(), + image: Some(sidecar_image), + image_pull_policy: Some("Always".to_string()), + ..Default::default() + }; + + ensure_sidecar_ports(&mut container); + apply_sidecar_env( + &mut container, + environment_id, + workload_id, + namespace, + auth_token, + manager_addr, + ); + + if let Some(routes_json) = proxy_routes_json { + upsert_env_var( + &mut container, + "LAPDEV_SIDECAR_PROXY_PORT_ROUTES", + routes_json, + ); + } + + container +} + +fn ensure_sidecar_ports(container: &mut Container) { + let ports = container.ports.get_or_insert_with(Vec::new); + + if !ports + .iter() + .any(|port| port.container_port == DEFAULT_SIDECAR_PROXY_PORT as i32) + { + ports.push(ContainerPort { + container_port: DEFAULT_SIDECAR_PROXY_PORT as i32, + name: Some("proxy".to_string()), + protocol: Some("TCP".to_string()), + ..Default::default() + }); + } + + if !ports + .iter() + .any(|port| port.container_port == DEFAULT_SIDECAR_PROXY_METRICS_PORT as i32) + { + ports.push(ContainerPort { + container_port: DEFAULT_SIDECAR_PROXY_METRICS_PORT as i32, + name: Some("metrics".to_string()), + protocol: Some("TCP".to_string()), + ..Default::default() + }); + } +} + +fn apply_sidecar_env( + container: &mut Container, + environment_id: Uuid, + workload_id: Uuid, + namespace: &str, + auth_token: &str, + manager_addr: &str, +) { + upsert_env_var( + container, + "LAPDEV_ENVIRONMENT_ID", + environment_id.to_string(), + ); + upsert_env_var( + container, + "LAPDEV_ENVIRONMENT_AUTH_TOKEN", + auth_token.to_string(), + ); + upsert_env_var(container, "KUBERNETES_NAMESPACE", namespace.to_string()); + upsert_env_var( + container, + SIDECAR_PROXY_BIND_ADDR_ENV_VAR, + DEFAULT_SIDECAR_PROXY_BIND_ADDR.to_string(), + ); + upsert_env_var( + container, + SIDECAR_PROXY_PORT_ENV_VAR, + DEFAULT_SIDECAR_PROXY_PORT.to_string(), + ); + upsert_env_var( + container, + SIDECAR_PROXY_MANAGER_ADDR_ENV_VAR, + manager_addr.to_string(), + ); + upsert_env_var( + container, + SIDECAR_PROXY_WORKLOAD_ENV_VAR, + workload_id.to_string(), + ); + + ensure_hostname_env(container); +} + +fn ensure_hostname_env(container: &mut Container) { + let env_list = container.env.get_or_insert_with(Vec::new); + + if env_list.iter().any(|var| var.name == "HOSTNAME") { + return; + } + + env_list.push(EnvVar { + name: "HOSTNAME".to_string(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "metadata.name".to_string(), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }); +} + +fn upsert_env_var(container: &mut Container, name: &str, value: String) { + let env_list = container.env.get_or_insert_with(Vec::new); + + if let Some(existing) = env_list.iter_mut().find(|var| var.name == name) { + existing.value = Some(value); + existing.value_from = None; + } else { + env_list.push(EnvVar { + name: name.to_string(), + value: Some(value), + value_from: None, + ..Default::default() + }); + } +} + +pub fn set_workload_replicas(workload: &mut KubeWorkloadYamlOnly, replicas: i32) -> Result<()> { + match workload { + KubeWorkloadYamlOnly::Deployment(yaml) => { + let mut deployment: Deployment = serde_yaml::from_str(yaml)?; + if let Some(spec) = deployment.spec.as_mut() { + spec.replicas = Some(replicas); + } + *yaml = serde_yaml::to_string(&deployment)?; + } + KubeWorkloadYamlOnly::StatefulSet(yaml) => { + let mut statefulset: StatefulSet = serde_yaml::from_str(yaml)?; + if let Some(spec) = statefulset.spec.as_mut() { + spec.replicas = Some(replicas); + } + *yaml = serde_yaml::to_string(&statefulset)?; + } + KubeWorkloadYamlOnly::ReplicaSet(yaml) => { + let mut replicaset: ReplicaSet = serde_yaml::from_str(yaml)?; + if let Some(spec) = replicaset.spec.as_mut() { + spec.replicas = Some(replicas); + } + *yaml = serde_yaml::to_string(&replicaset)?; + } + _ => {} + } + Ok(()) +} + +pub fn set_cronjob_suspend(workload: &mut KubeWorkloadYamlOnly, suspend: bool) -> Result<()> { + if let KubeWorkloadYamlOnly::CronJob(yaml) = workload { + let mut cronjob: CronJob = serde_yaml::from_str(yaml)?; + if let Some(spec) = cronjob.spec.as_mut() { + spec.suspend = Some(suspend); + } + *yaml = serde_yaml::to_string(&cronjob)?; + } + + Ok(()) +} + +pub fn set_daemonset_paused(workload: &mut KubeWorkloadYamlOnly, paused: bool) -> Result<()> { + if let KubeWorkloadYamlOnly::DaemonSet(yaml) = workload { + let mut daemonset: DaemonSet = serde_yaml::from_str(yaml)?; + if let Some(spec) = daemonset.spec.as_mut() { + if let Some(template) = spec.template.spec.as_mut() { + let selector = template.node_selector.get_or_insert_with(Default::default); + + if paused { + selector.insert("lapdev.io/paused".to_string(), "true".to_string()); + } else if let Some(selector) = template.node_selector.as_mut() { + selector.remove("lapdev.io/paused"); + if selector.is_empty() { + template.node_selector = None; + } + } + } + } + *yaml = serde_yaml::to_string(&daemonset)?; + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use k8s_openapi::api::apps::v1::Deployment as K8sDeployment; + use lapdev_common::kube::KubeEnvVar; + use serde_yaml::Value; + + fn base_container(name: &str) -> KubeContainerInfo { + KubeContainerInfo { + name: name.to_string(), + original_image: "example:image".to_string(), + image: KubeContainerImage::FollowOriginal, + cpu_request: None, + cpu_limit: None, + memory_request: None, + memory_limit: None, + env_vars: Vec::new(), + original_env_vars: Vec::new(), + ports: Vec::new(), + } + } + + fn assert_replica_clamped(kind: KubeWorkloadKind, yaml: &str) { + let containers = vec![base_container("app")]; + let rebuilt = rebuild_workload_yaml(&kind, yaml, &containers, None).unwrap(); + let parsed: Value = serde_yaml::from_str(&rebuilt).unwrap(); + let replicas = parsed + .get("spec") + .and_then(|spec| spec.get("replicas")) + .and_then(|value| value.as_i64()); + assert_eq!( + replicas, + Some(1), + "replica count not clamped for {:?}", + kind + ); + } + + #[test] + fn rebuild_workload_yaml_clamps_replicas_for_supported_kinds() { + let deployment = r#" +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deploy +spec: + replicas: 3 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: app + image: nginx +"#; + assert_replica_clamped(KubeWorkloadKind::Deployment, deployment); + + let statefulset = r#" +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-sts +spec: + serviceName: svc + replicas: 4 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: app + image: nginx +"#; + assert_replica_clamped(KubeWorkloadKind::StatefulSet, statefulset); + + let replicaset = r#" +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: test-rs +spec: + replicas: 5 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: app + image: nginx +"#; + assert_replica_clamped(KubeWorkloadKind::ReplicaSet, replicaset); + } + + #[test] + fn rebuild_workload_yaml_restores_follow_original_image_and_env() { + let yaml = r#" +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app +spec: + replicas: 3 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: app + image: custom/image:2 + env: + - name: CUSTOMIZED + value: stale +"#; + + let mut container = base_container("app"); + container.original_image = "registry.example.com/app:v1".to_string(); + container.original_env_vars = vec![KubeEnvVar { + name: "BASE".to_string(), + value: "original".to_string(), + }]; + container.env_vars.clear(); + + let rebuilt = + rebuild_workload_yaml(&KubeWorkloadKind::Deployment, yaml, &[container], None) + .expect("rebuild should succeed"); + + let deployment: K8sDeployment = serde_yaml::from_str(&rebuilt).unwrap(); + let pod_spec = deployment + .spec + .as_ref() + .and_then(|spec| spec.template.spec.as_ref()) + .expect("pod spec should exist"); + let container_spec = pod_spec.containers.first().expect("container should exist"); + + assert_eq!( + container_spec.image.as_deref(), + Some("registry.example.com/app:v1"), + "reset should restore original image" + ); + + let env = container_spec + .env + .as_ref() + .expect("env vars should be restored"); + assert!( + env.iter() + .any(|var| var.name == "BASE" && var.value.as_deref() == Some("original")), + "original env vars should be present: {:?}", + env + ); + assert!( + env.iter().all(|var| var.name != "CUSTOMIZED"), + "stale env vars should be removed: {:?}", + env + ); + } +} diff --git a/crates/api/src/kube_controller/service.rs b/crates/api/src/kube_controller/service.rs new file mode 100644 index 0000000..dab2319 --- /dev/null +++ b/crates/api/src/kube_controller/service.rs @@ -0,0 +1,69 @@ +use uuid::Uuid; + +use lapdev_rpc::error::ApiError; + +use super::KubeController; + +impl KubeController { + pub async fn get_environment_services( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result, ApiError> { + // Verify environment belongs to the organization + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + // Check authorization + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // If it's a personal environment, check ownership + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + if let Some(base_environment_id) = environment.base_environment_id { + // Branch environments inherit service definitions from their base environment. + // We return the base services but rewrite the environment_id so downstream callers + // keep treating them as belonging to the branch (important for preview URLs). + let base_environment = self + .db + .get_kube_environment(base_environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| { + ApiError::InvalidRequest("Base environment not found".to_string()) + })?; + + if base_environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + let mut base_services = self + .db + .get_environment_services(base_environment_id) + .await + .map_err(ApiError::from)?; + for service in &mut base_services { + service.environment_id = environment.id; + } + + return Ok(base_services); + } + + let services = self + .db + .get_environment_services(environment_id) + .await + .map_err(ApiError::from)?; + + Ok(services) + } +} diff --git a/crates/api/src/kube_controller/validation.rs b/crates/api/src/kube_controller/validation.rs new file mode 100644 index 0000000..695a6fc --- /dev/null +++ b/crates/api/src/kube_controller/validation.rs @@ -0,0 +1,216 @@ +use lapdev_common::kube::{KubeContainerImage, KubeContainerInfo}; +use lapdev_rpc::error::ApiError; + +pub fn validate_containers(containers: &[KubeContainerInfo]) -> Result<(), ApiError> { + if containers.is_empty() { + return Err(ApiError::InvalidRequest( + "At least one container is required".to_string(), + )); + } + + for (index, container) in containers.iter().enumerate() { + if container.name.trim().is_empty() { + return Err(ApiError::InvalidRequest(format!( + "Container {} name cannot be empty", + index + 1 + ))); + } + + match &container.image { + KubeContainerImage::Custom(image) => { + if image.trim().is_empty() { + return Err(ApiError::InvalidRequest(format!( + "Container '{}' custom image cannot be empty", + container.name + ))); + } + } + KubeContainerImage::FollowOriginal => { + // No validation needed for FollowOriginal + } + } + + // Validate CPU resources + if let Some(cpu_request) = &container.cpu_request { + if !is_valid_cpu_quantity(cpu_request) { + return Err(ApiError::InvalidRequest(format!("Container '{}' has invalid CPU request format. Use formats like '100m', '0.1', '1'. Minimum precision is 1m (0.001 CPU)", container.name))); + } + } + + if let Some(cpu_limit) = &container.cpu_limit { + if !is_valid_cpu_quantity(cpu_limit) { + return Err(ApiError::InvalidRequest(format!("Container '{}' has invalid CPU limit format. Use formats like '100m', '0.1', '1'. Minimum precision is 1m (0.001 CPU)", container.name))); + } + } + + // Validate memory resources + if let Some(memory_request) = &container.memory_request { + if !is_valid_memory_quantity(memory_request) { + return Err(ApiError::InvalidRequest(format!("Container '{}' has invalid memory request format. Use formats like '128Mi', '1Gi', '512M'. Maximum 3 decimal places allowed", container.name))); + } + } + + if let Some(memory_limit) = &container.memory_limit { + if !is_valid_memory_quantity(memory_limit) { + return Err(ApiError::InvalidRequest(format!("Container '{}' has invalid memory limit format. Use formats like '128Mi', '1Gi', '512M'. Maximum 3 decimal places allowed", container.name))); + } + } + + // Validate environment variables + for env_var in &container.env_vars { + if env_var.name.trim().is_empty() { + return Err(ApiError::InvalidRequest(format!( + "Container '{}' has environment variable with empty name", + container.name + ))); + } + } + } + + Ok(()) +} + +pub fn is_valid_cpu_quantity(quantity: &str) -> bool { + // CPU validation for Kubernetes + // Supports formats like: 100m, 0.1, 1, 2.5, etc. + // Kubernetes minimum precision is 1m (0.001 CPU) + if quantity.trim().is_empty() { + return false; // Empty values are invalid - must specify a resource amount + } + + let quantity = quantity.trim(); + + // CPU can have 'm' suffix for millicores or be a plain decimal number + let re = match regex::Regex::new(r"^(\d+\.?\d*|\.\d+)m?$") { + Ok(regex) => regex, + Err(_) => return false, + }; + + if !re.is_match(quantity) { + return false; + } + + // Parse the numeric part and validate precision constraints + let (numeric_part, has_millicore_suffix) = if quantity.ends_with('m') { + (&quantity[..quantity.len() - 1], true) + } else { + (quantity, false) + }; + + if let Ok(value) = numeric_part.parse::() { + if value <= 0.0 { + return false; // Must be positive + } + + if has_millicore_suffix { + // For millicores (m suffix), minimum is 1m, so value must be >= 1.0 + value >= 1.0 + } else { + // For plain decimal CPU values, minimum precision is 0.001 (1m equivalent) + value >= 0.001 + } + } else { + false + } +} + +pub fn is_valid_memory_quantity(quantity: &str) -> bool { + // Memory validation for Kubernetes + // Supports formats like: 128Mi, 1Gi, 512M, 1000000000, etc. + // Maximum precision is 3 decimal places + if quantity.trim().is_empty() { + return false; // Empty values are invalid - must specify a resource amount + } + + let quantity = quantity.trim(); + + // Memory can have binary (Ki, Mi, Gi, Ti, Pi, Ei) or decimal (K, M, G, T, P, E) suffixes + let valid_suffixes = [ + "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "K", "M", "G", "T", "P", "E", + ]; + + let (numeric_part, _suffix) = + if let Some(suffix) = valid_suffixes.iter().find(|&s| quantity.ends_with(s)) { + (&quantity[..quantity.len() - suffix.len()], Some(*suffix)) + } else { + // No suffix means it's in bytes + (quantity, None) + }; + + // Validate the numeric part is a positive number with max 3 decimal places + if let Ok(value) = numeric_part.parse::() { + if value <= 0.0 { + return false; // Must be positive + } + + // Check decimal places constraint (max 3 decimal places) + if let Some(decimal_pos) = numeric_part.find('.') { + let decimal_part = &numeric_part[decimal_pos + 1..]; + if decimal_part.len() > 3 { + return false; // More than 3 decimal places + } + } + + true + } else { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cpu_validation() { + // Valid CPU values + assert!(is_valid_cpu_quantity("100m")); // 100 millicores + assert!(is_valid_cpu_quantity("1m")); // 1 millicore (minimum) + assert!(is_valid_cpu_quantity("0.1")); // 0.1 CPU (100m equivalent) + assert!(is_valid_cpu_quantity("1")); // 1 CPU + assert!(is_valid_cpu_quantity("2.5")); // 2.5 CPUs + assert!(is_valid_cpu_quantity("500m")); // 500 millicores + assert!(is_valid_cpu_quantity("1.5m")); // 1.5 millicores + assert!(is_valid_cpu_quantity("0.001")); // Minimum decimal precision + + // Invalid CPU values + assert!(!is_valid_cpu_quantity("")); // Empty + assert!(!is_valid_cpu_quantity(" ")); // Whitespace + assert!(!is_valid_cpu_quantity("0.5m")); // Below 1m minimum + assert!(!is_valid_cpu_quantity("0.0005")); // Below 0.001 minimum + assert!(!is_valid_cpu_quantity("0m")); // Zero millicores + assert!(!is_valid_cpu_quantity("0")); // Zero CPU + assert!(!is_valid_cpu_quantity("100Mi")); // Wrong suffix + assert!(!is_valid_cpu_quantity("-100m")); // Negative + assert!(!is_valid_cpu_quantity("abc")); // Non-numeric + assert!(!is_valid_cpu_quantity("100x")); // Invalid suffix + } + + #[test] + fn test_memory_validation() { + // Valid memory values + assert!(is_valid_memory_quantity("128Mi")); + assert!(is_valid_memory_quantity("1Gi")); + assert!(is_valid_memory_quantity("512M")); + assert!(is_valid_memory_quantity("1000000000")); // Raw bytes + assert!(is_valid_memory_quantity("2Ti")); + assert!(is_valid_memory_quantity("1.5")); // 1 decimal place + assert!(is_valid_memory_quantity("1.5Gi")); // 1 decimal place + assert!(is_valid_memory_quantity("128.25Mi")); // 2 decimal places + assert!(is_valid_memory_quantity("1.125Gi")); // 3 decimal places (max) + assert!(is_valid_memory_quantity("0.5Gi")); // Decimal with suffix + + // Invalid memory values + assert!(!is_valid_memory_quantity("")); // Empty + assert!(!is_valid_memory_quantity(" ")); // Whitespace + assert!(!is_valid_memory_quantity("100m")); // CPU suffix on memory + assert!(!is_valid_memory_quantity("-128Mi")); // Negative + assert!(!is_valid_memory_quantity("abc")); // Non-numeric + assert!(!is_valid_memory_quantity("100x")); // Invalid suffix + assert!(!is_valid_memory_quantity("0Mi")); // Zero + assert!(!is_valid_memory_quantity("1.1234Gi")); // 4 decimal places (too many) + assert!(!is_valid_memory_quantity("1.1234")); // 4 decimal places (too many) + assert!(!is_valid_memory_quantity("128.12345Mi")); // 5 decimal places (too many) + assert!(!is_valid_memory_quantity("128.12345")); // 5 decimal places (too many) + } +} diff --git a/crates/api/src/kube_controller/workload.rs b/crates/api/src/kube_controller/workload.rs new file mode 100644 index 0000000..dde6440 --- /dev/null +++ b/crates/api/src/kube_controller/workload.rs @@ -0,0 +1,974 @@ +use uuid::Uuid; + +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + str::FromStr, + time::{Duration, Instant}, +}; + +use k8s_openapi::api::{ + apps::v1::{DaemonSet, Deployment, ReplicaSet, StatefulSet}, + batch::v1::{CronJob, Job}, + core::v1::{Pod, Service}, +}; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::{LabelSelector, ObjectMeta}; +use lapdev_common::kube::{ + KubeEnvironment, KubeEnvironmentService, KubeEnvironmentWorkload, + KubeEnvironmentWorkloadDetail, KubeServiceDetails, KubeServiceWithYaml, KubeWorkloadKind, +}; +use lapdev_kube::server::KubeClusterServer; +use lapdev_kube_rpc::{ + KubeWorkloadWithResources, KubeWorkloadYamlOnly, KubeWorkloadsWithResources, + NamespacedResourceKind, ProxyBranchRouteConfig, +}; +use lapdev_rpc::error::ApiError; + +use super::{resources::rebuild_workload_yaml, KubeController}; + +impl KubeController { + pub async fn get_environment_workloads( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + ) -> Result, ApiError> { + // Verify environment belongs to the organization + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + // Check authorization + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + // If it's a personal environment, check ownership + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + let mut workloads = self + .db + .get_environment_workloads(environment_id) + .await + .map_err(ApiError::from)?; + + if let Some(base_environment_id) = environment.base_environment_id { + let base_workloads = self + .db + .get_environment_workloads(base_environment_id) + .await + .map_err(ApiError::from)?; + let overridden: HashSet = workloads + .iter() + .filter_map(|w| w.base_workload_id) + .collect(); + + for base_workload in base_workloads { + if overridden.contains(&base_workload.id) { + continue; + } + let mut inherited = base_workload.clone(); + inherited.environment_id = environment.id; + if inherited.base_workload_id.is_none() { + inherited.base_workload_id = Some(base_workload.id); + } + workloads.push(inherited); + } + } + + Ok(workloads) + } + + pub async fn get_environment_workload_detail( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + ) -> Result { + let environment = self + .get_kube_environment(org_id, user_id, environment_id) + .await?; + + let workload = self + .db + .get_environment_workload(workload_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Workload not found".to_string()))?; + + let workload = Self::map_workload_to_environment(&environment, workload)?; + + Ok(KubeEnvironmentWorkloadDetail { + environment, + workload, + }) + } + + pub async fn get_environment_workload( + &self, + org_id: Uuid, + user_id: Uuid, + workload_id: Uuid, + ) -> Result, ApiError> { + // First get the workload to find its environment + if let Some(workload) = self + .db + .get_environment_workload(workload_id) + .await + .map_err(ApiError::from)? + { + // Verify the environment belongs to the organization + let environment = self + .db + .get_kube_environment(workload.environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + Ok(Some(workload)) + } else { + Ok(None) + } + } + + pub async fn delete_environment_workload( + &self, + org_id: Uuid, + user_id: Uuid, + environment_id: Uuid, + workload_id: Uuid, + ) -> Result<(), ApiError> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Environment not found".to_string()))?; + + if environment.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + + if !environment.is_shared && environment.user_id != user_id { + return Err(ApiError::Unauthorized); + } + + let workload = self + .db + .get_environment_workload(workload_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Workload not found".to_string()))?; + + if workload.environment_id != environment.id { + return Err(ApiError::InvalidRequest( + "Workload does not belong to this environment".to_string(), + )); + } + + if environment.base_environment_id.is_some() { + let cluster_server = self + .get_random_kube_cluster_server(environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest( + "No connected KubeManager for this cluster; cannot delete branch workload" + .to_string(), + ) + })?; + + self.delete_branch_workload_resources(&cluster_server, &environment, &workload) + .await?; + + if let Some((base_env_id, base_workload_id)) = environment + .base_environment_id + .zip(workload.base_workload_id) + { + Self::send_branch_service_route_removal( + &cluster_server, + base_env_id, + base_workload_id, + environment.id, + ) + .await; + } + } + + self.db + .delete_environment_workload(workload_id) + .await + .map_err(ApiError::from)?; + + Ok(()) + } + + pub async fn update_environment_workload( + &self, + _org_id: Uuid, + _user_id: Uuid, + workload_id: Uuid, + containers: Vec, + environment: lapdev_db_entities::kube_environment::Model, + ) -> Result { + let existing_workload = self + .db + .get_environment_workload(workload_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| ApiError::InvalidRequest("Workload not found".to_string()))?; + + let mut workload_record = existing_workload; + if let Some(base_environment_id) = environment.base_environment_id { + if workload_record.environment_id == base_environment_id { + workload_record = self + .ensure_branch_workload_override(&environment, &workload_record) + .await?; + } + } + + let kind = KubeWorkloadKind::from_str(&workload_record.kind).map_err(|_| { + ApiError::InvalidRequest(format!( + "Invalid workload kind {} for environment {}", + workload_record.kind, workload_record.environment_id + )) + })?; + + let target_workload_id = workload_record.id; + + let (workload_with_resources, persisted_yaml, extra_labels) = + if environment.base_environment_id.is_some() { + let (manifest, yaml, labels) = self + .build_branch_workload_manifest( + &environment, + &workload_record, + kind.clone(), + &containers, + ) + .await?; + (manifest, yaml, Some(labels)) + } else { + let (manifest, yaml) = Self::build_standard_workload_manifest( + kind, + &workload_record.workload_yaml, + &containers, + )?; + let mut labels = HashMap::new(); + labels.insert( + "lapdev.base-workload".to_string(), + workload_record.name.clone(), + ); + labels.insert( + "lapdev.base-workload-id".to_string(), + workload_record.id.to_string(), + ); + (manifest, yaml, Some(labels)) + }; + + let cluster_server = self + .get_random_kube_cluster_server(environment.cluster_id) + .await + .ok_or_else(|| { + ApiError::InvalidRequest("No connected KubeManager for this cluster".to_string()) + })?; + + self.deploy_environment_resources( + &cluster_server, + &environment, + KubeWorkloadsWithResources { + workloads: vec![workload_with_resources.workload], + services: workload_with_resources.services, + configmaps: workload_with_resources.configmaps, + secrets: workload_with_resources.secrets, + }, + extra_labels, + ) + .await?; + + if let Some(base_environment_id) = environment.base_environment_id { + match workload_record.base_workload_id { + Some(base_workload_id) => { + let base_environment = self + .db + .get_kube_environment(base_environment_id) + .await + .map_err(ApiError::from)? + .ok_or_else(|| { + ApiError::InvalidRequest(format!( + "Base environment {} not found", + base_environment_id + )) + })?; + + match cluster_server + .build_branch_service_route_config( + &base_environment, + base_workload_id, + environment.id, + ) + .await + { + Ok(Some(route)) => { + Self::send_branch_service_route_update( + &cluster_server, + base_environment_id, + base_workload_id, + environment.id, + route, + ) + .await; + } + Ok(None) => { + Self::send_branch_service_route_removal( + &cluster_server, + base_environment_id, + base_workload_id, + environment.id, + ) + .await; + } + Err(err) => { + tracing::warn!( + base_environment_id = %base_environment_id, + branch_environment_id = %environment.id, + workload_id = %base_workload_id, + error = ?err, + "Failed to build branch service route config; falling back to full refresh" + ); + Self::refresh_branch_service_routes_with_logging( + &cluster_server, + base_environment_id, + ) + .await; + } + } + } + None => { + tracing::warn!( + environment_id = %environment.id, + workload_id = %workload_id, + "Branch workload missing base_workload_id; falling back to full refresh" + ); + Self::refresh_branch_service_routes_with_logging( + &cluster_server, + base_environment_id, + ) + .await; + } + } + } + + self.db + .update_environment_workload(target_workload_id, containers, persisted_yaml) + .await + .map_err(ApiError::from)?; + + Ok(target_workload_id) + } + + async fn delete_branch_workload_resources( + &self, + cluster_server: &KubeClusterServer, + environment: &lapdev_db_entities::kube_environment::Model, + workload: &lapdev_common::kube::KubeEnvironmentWorkload, + ) -> Result<(), ApiError> { + let kind = KubeWorkloadKind::from_str(&workload.kind).map_err(|_| { + ApiError::InvalidRequest(format!( + "Invalid workload kind {} in branch environment", + workload.kind + )) + })?; + + let mut resources = vec![(workload_kind_to_resource_kind(&kind), workload.name.clone())]; + + let services = self + .db + .get_environment_services(environment.id) + .await + .map_err(ApiError::from)?; + + for service in services { + if service_targets_branch_workload(&service, &workload.name) { + resources.push((NamespacedResourceKind::Service, service.name.clone())); + } + } + + let mut ctx = Self::kube_manager_rpc_context(); + + match cluster_server + .rpc_client + .delete_namespaced_resources(ctx, environment.namespace.clone(), resources) + .await + { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(ApiError::InvalidRequest(format!( + "Failed to delete branch workload resources: {err}" + ))), + Err(err) => Err(ApiError::InvalidRequest(format!( + "Connection error while deleting branch workload resources: {err}" + ))), + } + } + + pub(super) async fn send_branch_service_route_update( + cluster_server: &KubeClusterServer, + base_environment_id: Uuid, + base_workload_id: Uuid, + branch_environment_id: Uuid, + route: ProxyBranchRouteConfig, + ) { + match cluster_server + .rpc_client + .update_branch_service_route( + Self::kube_manager_rpc_context(), + base_environment_id, + base_workload_id, + route, + ) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => { + tracing::warn!( + base_environment_id = %base_environment_id, + branch_environment_id = %branch_environment_id, + workload_id = %base_workload_id, + error = %err, + "KubeManager rejected branch service route update" + ); + } + Err(err) => { + tracing::warn!( + base_environment_id = %base_environment_id, + branch_environment_id = %branch_environment_id, + workload_id = %base_workload_id, + error = %err, + "Failed to send branch service route update to KubeManager" + ); + } + } + } + + pub(super) async fn send_branch_service_route_removal( + cluster_server: &KubeClusterServer, + base_environment_id: Uuid, + base_workload_id: Uuid, + branch_environment_id: Uuid, + ) { + match cluster_server + .rpc_client + .remove_branch_service_route( + Self::kube_manager_rpc_context(), + base_environment_id, + base_workload_id, + branch_environment_id, + ) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => { + tracing::warn!( + base_environment_id = %base_environment_id, + branch_environment_id = %branch_environment_id, + workload_id = %base_workload_id, + error = %err, + "KubeManager rejected branch service route removal" + ); + } + Err(err) => { + tracing::warn!( + base_environment_id = %base_environment_id, + branch_environment_id = %branch_environment_id, + workload_id = %base_workload_id, + error = %err, + "Failed to send branch service route removal to KubeManager" + ); + } + } + } + + pub(super) async fn refresh_branch_service_routes_with_logging( + cluster_server: &KubeClusterServer, + base_environment_id: Uuid, + ) { + if let Err(err) = cluster_server + .rpc_client + .refresh_branch_service_routes(Self::kube_manager_rpc_context(), base_environment_id) + .await + { + tracing::warn!( + base_environment_id = %base_environment_id, + error = %err, + "Failed to refresh branch service routes during fallback" + ); + } + } + + pub(super) async fn build_branch_workload_manifest( + &self, + environment: &lapdev_db_entities::kube_environment::Model, + existing_workload: &lapdev_common::kube::KubeEnvironmentWorkload, + kind: KubeWorkloadKind, + containers: &[lapdev_common::kube::KubeContainerInfo], + ) -> Result<(KubeWorkloadWithResources, String, HashMap), ApiError> { + let base_workload_name = existing_workload.name.clone(); + let env_suffix = environment.id.to_string(); + let branch_workload_name = if base_workload_name.ends_with(&env_suffix) { + base_workload_name.clone() + } else { + format!("{}-{}", base_workload_name, environment.id) + }; + let branch_selector = build_branch_service_selector(&branch_workload_name); + + let (mut workload_with_resources, persisted_yaml) = Self::build_standard_workload_manifest( + kind, + &existing_workload.workload_yaml, + containers, + )?; + + let environment_services = self + .db + .get_environment_services(environment.id) + .await + .map_err(ApiError::from)?; + + let mut updated_services: HashMap = HashMap::new(); + for service in environment_services { + let mut matches_workload = + service.name == base_workload_name || service.name == branch_workload_name; + if !matches_workload { + matches_workload = service + .selector + .values() + .any(|value| value == &base_workload_name || value == &branch_workload_name); + } + if !matches_workload { + continue; + } + + if service.yaml.trim().is_empty() { + tracing::warn!( + environment_id = %environment.id, + workload = %existing_workload.name, + service = %service.name, + "Branch workload service YAML is empty; skipping service deployment" + ); + continue; + } + + let branch_service_name = format!("{}-{}", service.name, environment.id); + updated_services.insert( + branch_service_name.clone(), + KubeServiceWithYaml { + yaml: service.yaml.clone(), + details: KubeServiceDetails { + name: branch_service_name, + ports: service.ports.clone(), + selector: branch_selector.clone(), + }, + }, + ); + } + workload_with_resources.services = updated_services; + workload_with_resources.configmaps.clear(); + workload_with_resources.secrets.clear(); + + let mut extra_labels = HashMap::new(); + extra_labels.insert( + "lapdev.branch-environment".to_string(), + environment.id.to_string(), + ); + extra_labels.insert( + "lapdev.base-workload".to_string(), + existing_workload.name.clone(), + ); + extra_labels.insert( + "lapdev.io/routing-key".to_string(), + format!("branch-{}", environment.id), + ); + extra_labels.insert( + "lapdev.io/proxy-target-port".to_string(), + "8080".to_string(), + ); + extra_labels.insert( + "lapdev.base-workload-id".to_string(), + existing_workload.id.to_string(), + ); + + Ok((workload_with_resources, persisted_yaml, extra_labels)) + } + + fn build_standard_workload_manifest( + kind: KubeWorkloadKind, + original_yaml: &str, + containers: &[lapdev_common::kube::KubeContainerInfo], + ) -> Result<(KubeWorkloadWithResources, String), ApiError> { + let rebuilt_yaml = + rebuild_workload_yaml(&kind, original_yaml, containers, None).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to rebuild workload manifest: {}", err)) + })?; + + let workload = match kind { + KubeWorkloadKind::Deployment => KubeWorkloadYamlOnly::Deployment(rebuilt_yaml.clone()), + KubeWorkloadKind::StatefulSet => { + KubeWorkloadYamlOnly::StatefulSet(rebuilt_yaml.clone()) + } + KubeWorkloadKind::DaemonSet => KubeWorkloadYamlOnly::DaemonSet(rebuilt_yaml.clone()), + KubeWorkloadKind::ReplicaSet => KubeWorkloadYamlOnly::ReplicaSet(rebuilt_yaml.clone()), + KubeWorkloadKind::Pod => KubeWorkloadYamlOnly::Pod(rebuilt_yaml.clone()), + KubeWorkloadKind::Job => KubeWorkloadYamlOnly::Job(rebuilt_yaml.clone()), + KubeWorkloadKind::CronJob => KubeWorkloadYamlOnly::CronJob(rebuilt_yaml.clone()), + }; + + Ok(( + KubeWorkloadWithResources { + workload, + services: HashMap::new(), + configmaps: HashMap::new(), + secrets: HashMap::new(), + }, + rebuilt_yaml, + )) + } + + fn map_workload_to_environment( + environment: &KubeEnvironment, + mut workload: KubeEnvironmentWorkload, + ) -> Result { + if workload.environment_id == environment.id { + return Ok(workload); + } + + if let Some(base_environment_id) = environment.base_environment_id { + if workload.environment_id == base_environment_id { + if workload.base_workload_id.is_none() { + workload.base_workload_id = Some(workload.id); + } + workload.environment_id = environment.id; + return Ok(workload); + } + } + + Err(ApiError::InvalidRequest( + "Workload does not belong to this environment".to_string(), + )) + } +} + +pub(super) fn rename_workload_yaml( + workload: &mut KubeWorkloadYamlOnly, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + match workload { + KubeWorkloadYamlOnly::Deployment(yaml) => { + rename_deployment_yaml(yaml, new_name, selector_labels) + } + KubeWorkloadYamlOnly::StatefulSet(yaml) => { + rename_statefulset_yaml(yaml, new_name, selector_labels) + } + KubeWorkloadYamlOnly::DaemonSet(yaml) => { + rename_daemonset_yaml(yaml, new_name, selector_labels) + } + KubeWorkloadYamlOnly::ReplicaSet(yaml) => { + rename_replicaset_yaml(yaml, new_name, selector_labels) + } + KubeWorkloadYamlOnly::Pod(yaml) => rename_pod_yaml(yaml, new_name, selector_labels), + KubeWorkloadYamlOnly::Job(yaml) => rename_job_yaml(yaml, new_name, selector_labels), + KubeWorkloadYamlOnly::CronJob(yaml) => rename_cronjob_yaml(yaml, new_name, selector_labels), + } +} + +pub(super) fn build_branch_service_selector(workload_name: &str) -> BTreeMap { + let mut selector = BTreeMap::new(); + selector.insert("app".to_string(), workload_name.to_string()); + selector +} + +fn service_targets_branch_workload( + service: &KubeEnvironmentService, + branch_workload_name: &str, +) -> bool { + if service.name == branch_workload_name { + return true; + } + service + .selector + .values() + .any(|value| value == branch_workload_name) +} + +fn workload_kind_to_resource_kind(kind: &KubeWorkloadKind) -> NamespacedResourceKind { + match kind { + KubeWorkloadKind::Deployment => NamespacedResourceKind::Deployment, + KubeWorkloadKind::StatefulSet => NamespacedResourceKind::StatefulSet, + KubeWorkloadKind::DaemonSet => NamespacedResourceKind::DaemonSet, + KubeWorkloadKind::ReplicaSet => NamespacedResourceKind::ReplicaSet, + KubeWorkloadKind::Pod => NamespacedResourceKind::Pod, + KubeWorkloadKind::Job => NamespacedResourceKind::Job, + KubeWorkloadKind::CronJob => NamespacedResourceKind::CronJob, + } +} + +pub(super) fn rename_service_yaml( + yaml: &str, + new_service_name: &str, + selector_labels: &BTreeMap, +) -> Result { + let mut service: Service = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to parse service YAML for rename: {}", err)) + })?; + + service.metadata.name = Some(new_service_name.to_string()); + let mut labels = BTreeMap::new(); + labels.insert("app".to_string(), new_service_name.to_string()); + service.metadata.labels = Some(labels); + + if let Some(spec) = service.spec.as_mut() { + spec.selector = Some(selector_labels.clone()); + } + + serde_yaml::to_string(&service).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to serialize renamed service YAML: {}", err)) + }) +} + +fn rename_deployment_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut deployment: Deployment = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse deployment YAML for rename: {}", + err + )) + })?; + + update_object_meta(&mut deployment.metadata, new_name, selector_labels); + + if let Some(spec) = deployment.spec.as_mut() { + update_selector_labels(&mut spec.selector, selector_labels); + if let Some(template) = spec.template.metadata.as_mut() { + update_object_meta(template, new_name, selector_labels); + } + } + + *yaml = serde_yaml::to_string(&deployment).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize renamed deployment YAML: {}", + err + )) + })?; + Ok(()) +} + +fn rename_statefulset_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut statefulset: StatefulSet = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse statefulset YAML for rename: {}", + err + )) + })?; + + update_object_meta(&mut statefulset.metadata, new_name, selector_labels); + + if let Some(spec) = statefulset.spec.as_mut() { + update_selector_labels(&mut spec.selector, selector_labels); + if let Some(template) = spec.template.metadata.as_mut() { + update_object_meta(template, new_name, selector_labels); + } + } + + *yaml = serde_yaml::to_string(&statefulset).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize renamed statefulset YAML: {}", + err + )) + })?; + Ok(()) +} + +fn rename_daemonset_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut daemonset: DaemonSet = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse daemonset YAML for rename: {}", + err + )) + })?; + + update_object_meta(&mut daemonset.metadata, new_name, selector_labels); + + if let Some(spec) = daemonset.spec.as_mut() { + update_selector_labels(&mut spec.selector, selector_labels); + if let Some(template) = spec.template.metadata.as_mut() { + update_object_meta(template, new_name, selector_labels); + } + } + + *yaml = serde_yaml::to_string(&daemonset).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize renamed daemonset YAML: {}", + err + )) + })?; + Ok(()) +} + +fn rename_replicaset_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut replicaset: ReplicaSet = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to parse replicaset YAML for rename: {}", + err + )) + })?; + + update_object_meta(&mut replicaset.metadata, new_name, selector_labels); + + if let Some(spec) = replicaset.spec.as_mut() { + update_selector_labels(&mut spec.selector, selector_labels); + if let Some(template) = spec.template.as_mut() { + if let Some(metadata) = template.metadata.as_mut() { + update_object_meta(metadata, new_name, selector_labels); + } + } + } + + *yaml = serde_yaml::to_string(&replicaset).map_err(|err| { + ApiError::InvalidRequest(format!( + "Failed to serialize renamed replicaset YAML: {}", + err + )) + })?; + Ok(()) +} + +fn rename_pod_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut pod: Pod = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to parse pod YAML for rename: {}", err)) + })?; + + update_object_meta(&mut pod.metadata, new_name, selector_labels); + + *yaml = serde_yaml::to_string(&pod).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to serialize renamed pod YAML: {}", err)) + })?; + Ok(()) +} + +fn rename_job_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut job: Job = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to parse job YAML for rename: {}", err)) + })?; + + update_object_meta(&mut job.metadata, new_name, selector_labels); + + if let Some(spec) = job.spec.as_mut() { + update_optional_selector_labels(&mut spec.selector, selector_labels); + if let Some(template) = spec.template.metadata.as_mut() { + update_object_meta(template, new_name, selector_labels); + } + } + + *yaml = serde_yaml::to_string(&job).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to serialize renamed job YAML: {}", err)) + })?; + Ok(()) +} + +fn rename_cronjob_yaml( + yaml: &mut String, + new_name: &str, + selector_labels: &BTreeMap, +) -> Result<(), ApiError> { + let mut cronjob: CronJob = serde_yaml::from_str(yaml).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to parse cronjob YAML for rename: {}", err)) + })?; + + update_object_meta(&mut cronjob.metadata, new_name, selector_labels); + + if let Some(spec) = cronjob.spec.as_mut() { + let job_template = &mut spec.job_template; + if let Some(job_spec) = job_template.spec.as_mut() { + update_optional_selector_labels(&mut job_spec.selector, selector_labels); + if let Some(template) = job_spec.template.metadata.as_mut() { + update_object_meta(template, new_name, selector_labels); + } + } + } + + *yaml = serde_yaml::to_string(&cronjob).map_err(|err| { + ApiError::InvalidRequest(format!("Failed to serialize renamed cronjob YAML: {}", err)) + })?; + Ok(()) +} + +fn update_object_meta( + metadata: &mut ObjectMeta, + new_name: &str, + selector_labels: &BTreeMap, +) { + metadata.name = Some(new_name.to_string()); + ensure_labels_from_map(&mut metadata.labels, selector_labels); +} + +fn ensure_labels_from_map( + labels: &mut Option>, + desired: &BTreeMap, +) { + *labels = Some(desired.clone()); +} + +fn update_selector_labels(selector: &mut LabelSelector, desired: &BTreeMap) { + selector.match_labels = Some(desired.clone()); +} + +fn update_optional_selector_labels( + selector: &mut Option, + desired: &BTreeMap, +) { + match selector { + Some(existing) => update_selector_labels(existing, desired), + None => { + let mut new_selector = LabelSelector::default(); + update_selector_labels(&mut new_selector, desired); + *selector = Some(new_selector); + } + } +} + +fn to_btreemap(labels: &HashMap) -> BTreeMap { + labels + .iter() + .map(|(key, value)| (key.clone(), value.clone())) + .collect() +} diff --git a/crates/api/src/kube_controller/workload_yaml_cleaner.rs b/crates/api/src/kube_controller/workload_yaml_cleaner.rs new file mode 100644 index 0000000..a3c4ba6 --- /dev/null +++ b/crates/api/src/kube_controller/workload_yaml_cleaner.rs @@ -0,0 +1,413 @@ +use std::collections::HashMap; + +use anyhow::Result; +use k8s_openapi::{ + api::{ + apps::v1::{ + DaemonSet, DaemonSetSpec, Deployment, DeploymentSpec, ReplicaSet, ReplicaSetSpec, + StatefulSet, StatefulSetSpec, + }, + batch::v1::{CronJob, CronJobSpec, Job, JobSpec}, + core::v1::{Container, ContainerPort, EnvVar, EnvVarSource, Pod, PodSpec, PodTemplateSpec}, + }, + apimachinery::pkg::{api::resource::Quantity, apis::meta::v1::ObjectMeta}, +}; +use lapdev_common::kube::{KubeContainerImage, KubeContainerInfo, KubeWorkloadKind}; + +use super::resources::SidecarInjectionOptions; + +/// Rebuild workload YAML so it reflects the latest container configuration while +/// stripping server-managed metadata. Mirrors kube-manager's clean_* helpers. +pub fn rebuild_workload_yaml( + kind: &KubeWorkloadKind, + yaml: &str, + containers: &[KubeContainerInfo], + _sidecar: Option<&SidecarInjectionOptions<'_>>, +) -> Result { + match kind { + KubeWorkloadKind::Deployment => { + let deployment: Deployment = serde_yaml::from_str(yaml)?; + let cleaned = clean_deployment(deployment, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::StatefulSet => { + let statefulset: StatefulSet = serde_yaml::from_str(yaml)?; + let cleaned = clean_statefulset(statefulset, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::DaemonSet => { + let daemonset: DaemonSet = serde_yaml::from_str(yaml)?; + let cleaned = clean_daemonset(daemonset, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::ReplicaSet => { + let replicaset: ReplicaSet = serde_yaml::from_str(yaml)?; + let cleaned = clean_replicaset(replicaset, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::Pod => { + let pod: Pod = serde_yaml::from_str(yaml)?; + let cleaned = clean_pod(pod, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::Job => { + let job: Job = serde_yaml::from_str(yaml)?; + let cleaned = clean_job(job, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + KubeWorkloadKind::CronJob => { + let cronjob: CronJob = serde_yaml::from_str(yaml)?; + let cleaned = clean_cronjob(cronjob, containers); + Ok(serde_yaml::to_string(&cleaned)?) + } + } +} + +fn clean_metadata(metadata: ObjectMeta) -> ObjectMeta { + ObjectMeta { + name: metadata.name, + labels: metadata.labels, + ..Default::default() + } +} + +fn clean_deployment( + deployment: Deployment, + workload_containers: &[KubeContainerInfo], +) -> Deployment { + let clean_spec = deployment.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + DeploymentSpec { + replicas: original_spec.replicas, + selector: original_spec.selector, + template, + min_ready_seconds: original_spec.min_ready_seconds, + paused: original_spec.paused, + progress_deadline_seconds: original_spec.progress_deadline_seconds, + revision_history_limit: original_spec.revision_history_limit, + strategy: original_spec.strategy, + } + }); + + Deployment { + metadata: clean_metadata(deployment.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_statefulset( + statefulset: StatefulSet, + workload_containers: &[KubeContainerInfo], +) -> StatefulSet { + let clean_spec = statefulset.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + StatefulSetSpec { + service_name: original_spec.service_name, + replicas: original_spec.replicas, + selector: original_spec.selector, + template, + volume_claim_templates: original_spec.volume_claim_templates, + update_strategy: original_spec.update_strategy, + min_ready_seconds: original_spec.min_ready_seconds, + persistent_volume_claim_retention_policy: original_spec + .persistent_volume_claim_retention_policy, + ordinals: original_spec.ordinals, + revision_history_limit: original_spec.revision_history_limit, + pod_management_policy: original_spec.pod_management_policy, + } + }); + + StatefulSet { + metadata: clean_metadata(statefulset.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_daemonset(daemonset: DaemonSet, workload_containers: &[KubeContainerInfo]) -> DaemonSet { + let clean_spec = daemonset.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + DaemonSetSpec { + selector: original_spec.selector, + template, + update_strategy: original_spec.update_strategy, + min_ready_seconds: original_spec.min_ready_seconds, + revision_history_limit: original_spec.revision_history_limit, + } + }); + + DaemonSet { + metadata: clean_metadata(daemonset.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_replicaset( + replicaset: ReplicaSet, + workload_containers: &[KubeContainerInfo], +) -> ReplicaSet { + let clean_spec = replicaset.spec.map(|original_spec| { + let template = original_spec + .template + .map(|t| merge_template_containers(t, workload_containers)); + + ReplicaSetSpec { + replicas: original_spec.replicas, + selector: original_spec.selector, + template, + min_ready_seconds: original_spec.min_ready_seconds, + } + }); + + ReplicaSet { + metadata: clean_metadata(replicaset.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_pod(pod: Pod, workload_containers: &[KubeContainerInfo]) -> Pod { + let clean_spec = pod.spec.map(|original_spec| { + let merged_containers = merge_containers(original_spec.containers, workload_containers); + + PodSpec { + active_deadline_seconds: original_spec.active_deadline_seconds, + containers: merged_containers, + init_containers: original_spec.init_containers, + ephemeral_containers: original_spec.ephemeral_containers, + volumes: original_spec.volumes, + restart_policy: original_spec.restart_policy, + termination_grace_period_seconds: original_spec.termination_grace_period_seconds, + dns_policy: original_spec.dns_policy, + dns_config: original_spec.dns_config, + node_selector: original_spec.node_selector, + service_account_name: None, + service_account: None, + automount_service_account_token: None, + security_context: original_spec.security_context, + image_pull_secrets: original_spec.image_pull_secrets, + affinity: original_spec.affinity, + tolerations: original_spec.tolerations, + topology_spread_constraints: original_spec.topology_spread_constraints, + priority_class_name: original_spec.priority_class_name, + priority: original_spec.priority, + preemption_policy: original_spec.preemption_policy, + overhead: original_spec.overhead, + enable_service_links: original_spec.enable_service_links, + os: original_spec.os, + host_users: original_spec.host_users, + scheduling_gates: original_spec.scheduling_gates, + resource_claims: original_spec.resource_claims, + ..Default::default() + } + }); + + Pod { + metadata: clean_metadata(pod.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_job(job: Job, workload_containers: &[KubeContainerInfo]) -> Job { + let clean_spec = job.spec.map(|original_spec| { + let template = merge_template_containers(original_spec.template, workload_containers); + + JobSpec { + template, + parallelism: original_spec.parallelism, + completions: original_spec.completions, + completion_mode: original_spec.completion_mode, + active_deadline_seconds: original_spec.active_deadline_seconds, + backoff_limit: original_spec.backoff_limit, + backoff_limit_per_index: original_spec.backoff_limit_per_index, + max_failed_indexes: original_spec.max_failed_indexes, + selector: original_spec.selector, + manual_selector: original_spec.manual_selector, + ttl_seconds_after_finished: original_spec.ttl_seconds_after_finished, + suspend: original_spec.suspend, + pod_failure_policy: original_spec.pod_failure_policy, + pod_replacement_policy: original_spec.pod_replacement_policy, + managed_by: original_spec.managed_by, + success_policy: original_spec.success_policy, + } + }); + + Job { + metadata: clean_metadata(job.metadata), + spec: clean_spec, + status: None, + } +} + +fn clean_cronjob(cronjob: CronJob, workload_containers: &[KubeContainerInfo]) -> CronJob { + let clean_spec = cronjob.spec.map(|original_spec| { + let mut job_template = original_spec.job_template; + if let Some(job_spec) = &mut job_template.spec { + job_spec.template = + merge_template_containers(job_spec.template.clone(), workload_containers); + } + + CronJobSpec { + schedule: original_spec.schedule, + time_zone: original_spec.time_zone, + starting_deadline_seconds: original_spec.starting_deadline_seconds, + concurrency_policy: original_spec.concurrency_policy, + suspend: original_spec.suspend, + job_template, + successful_jobs_history_limit: original_spec.successful_jobs_history_limit, + failed_jobs_history_limit: original_spec.failed_jobs_history_limit, + } + }); + + CronJob { + metadata: clean_metadata(cronjob.metadata), + spec: clean_spec, + status: None, + } +} + +fn merge_template_containers( + template: PodTemplateSpec, + workload_containers: &[KubeContainerInfo], +) -> PodTemplateSpec { + let pod_spec = template.spec.map(|original_pod_spec| { + let mut new_spec = original_pod_spec; + let merged_containers = merge_containers(new_spec.containers, workload_containers); + new_spec.containers = merged_containers; + new_spec.service_account_name = None; + new_spec.service_account = None; + new_spec.automount_service_account_token = None; + new_spec + }); + + PodTemplateSpec { + spec: pod_spec, + ..template + } +} + +fn merge_containers( + containers: Vec, + workload_containers: &[KubeContainerInfo], +) -> Vec { + containers + .into_iter() + .map(|container| { + if let Some(workload_container) = workload_containers + .iter() + .find(|wc| wc.name == container.name) + { + merge_single_container(container, workload_container) + } else { + container + } + }) + .collect() +} + +fn merge_single_container( + container: Container, + workload_container: &KubeContainerInfo, +) -> Container { + let mut new_container = container.clone(); + + match &workload_container.image { + KubeContainerImage::FollowOriginal => {} + KubeContainerImage::Custom(custom_image) => { + if !custom_image.is_empty() { + new_container.image = Some(custom_image.clone()); + } + } + } + + let mut resources = container.resources.unwrap_or_default(); + let mut requests = resources.requests.unwrap_or_default(); + let mut limits = resources.limits.unwrap_or_default(); + + if let Some(cpu_request) = &workload_container.cpu_request { + if !cpu_request.is_empty() { + requests.insert("cpu".to_string(), Quantity(cpu_request.clone())); + } + } + if let Some(memory_request) = &workload_container.memory_request { + if !memory_request.is_empty() { + requests.insert("memory".to_string(), Quantity(memory_request.clone())); + } + } + + if let Some(cpu_limit) = &workload_container.cpu_limit { + if !cpu_limit.is_empty() { + limits.insert("cpu".to_string(), Quantity(cpu_limit.clone())); + } + } + if let Some(memory_limit) = &workload_container.memory_limit { + if !memory_limit.is_empty() { + limits.insert("memory".to_string(), Quantity(memory_limit.clone())); + } + } + + resources.requests = if requests.is_empty() { + None + } else { + Some(requests) + }; + resources.limits = if limits.is_empty() { + None + } else { + Some(limits) + }; + new_container.resources = Some(resources); + + let mut env_map: HashMap, Option)> = HashMap::new(); + + if let Some(original_env) = container.env { + for env_var in original_env { + env_map.insert(env_var.name.clone(), (env_var.value, env_var.value_from)); + } + } + + for kube_env_var in &workload_container.env_vars { + env_map.insert( + kube_env_var.name.clone(), + (Some(kube_env_var.value.clone()), None), + ); + } + + let merged_env: Vec = env_map + .into_iter() + .map(|(name, (value, value_from))| EnvVar { + name, + value, + value_from, + }) + .collect(); + + new_container.env = if merged_env.is_empty() { + None + } else { + Some(merged_env) + }; + + // Preserve defined container ports; ensure they match desired ones + if !workload_container.ports.is_empty() { + let ports: Vec = workload_container + .ports + .iter() + .map(|port| ContainerPort { + name: port.name.clone(), + container_port: port.container_port, + protocol: port.protocol.clone(), + ..Default::default() + }) + .collect(); + new_container.ports = Some(ports); + } + + new_container +} diff --git a/crates/api/src/kube_controller/yaml_parser.rs b/crates/api/src/kube_controller/yaml_parser.rs new file mode 100644 index 0000000..48005d2 --- /dev/null +++ b/crates/api/src/kube_controller/yaml_parser.rs @@ -0,0 +1,257 @@ +use std::collections::{BTreeMap, HashSet}; + +use anyhow::anyhow; +use k8s_openapi::api::core::v1::PodSpec; +use k8s_openapi::api::{ + apps::v1::{DaemonSet, Deployment, ReplicaSet, StatefulSet}, + batch::v1::{CronJob, Job}, + core::v1::Pod, +}; +use lapdev_common::kube::{ + KubeContainerImage, KubeContainerInfo, KubeContainerPort, KubeServicePort, KubeWorkloadDetails, + KubeWorkloadKind, +}; +use lapdev_db::api::CachedClusterService; +use lapdev_kube_rpc::KubeRawWorkloadYaml; + +pub fn build_workload_details_from_yaml( + raw: KubeRawWorkloadYaml, + services: &[CachedClusterService], +) -> anyhow::Result { + let KubeRawWorkloadYaml { + name, + namespace, + kind, + workload_yaml, + } = raw; + + let (containers, labels) = extract_containers_and_labels(&kind, &workload_yaml)?; + let ports = ports_from_cached_services(&labels, services); + + Ok(KubeWorkloadDetails { + name, + namespace, + kind, + containers, + ports, + workload_yaml, + base_workload_id: None, + }) +} + +fn extract_containers_and_labels( + kind: &KubeWorkloadKind, + workload_yaml: &str, +) -> anyhow::Result<(Vec, BTreeMap)> { + match kind { + KubeWorkloadKind::Deployment => { + let deployment: Deployment = serde_yaml::from_str(workload_yaml)?; + let labels = deployment + .spec + .as_ref() + .and_then(|s| s.template.metadata.as_ref()) + .and_then(|m| m.labels.clone()) + .unwrap_or_default(); + let pod_spec = deployment + .spec + .as_ref() + .and_then(|s| s.template.spec.as_ref()) + .ok_or_else(|| anyhow!("Deployment missing pod spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + KubeWorkloadKind::StatefulSet => { + let statefulset: StatefulSet = serde_yaml::from_str(workload_yaml)?; + let labels = statefulset + .spec + .as_ref() + .and_then(|s| s.template.metadata.as_ref()) + .and_then(|m| m.labels.clone()) + .unwrap_or_default(); + let pod_spec = statefulset + .spec + .as_ref() + .and_then(|s| s.template.spec.as_ref()) + .ok_or_else(|| anyhow!("StatefulSet missing pod spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + KubeWorkloadKind::DaemonSet => { + let daemonset: DaemonSet = serde_yaml::from_str(workload_yaml)?; + let labels = daemonset + .spec + .as_ref() + .and_then(|s| s.template.metadata.as_ref()) + .and_then(|m| m.labels.clone()) + .unwrap_or_default(); + let pod_spec = daemonset + .spec + .as_ref() + .and_then(|s| s.template.spec.as_ref()) + .ok_or_else(|| anyhow!("DaemonSet missing pod spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + KubeWorkloadKind::ReplicaSet => { + let replicaset: ReplicaSet = serde_yaml::from_str(workload_yaml)?; + let labels = replicaset + .spec + .as_ref() + .and_then(|s| s.template.as_ref()) + .and_then(|t| t.metadata.as_ref()) + .and_then(|m| m.labels.clone()) + .unwrap_or_default(); + let pod_spec = replicaset + .spec + .as_ref() + .and_then(|s| s.template.as_ref()) + .and_then(|t| t.spec.as_ref()) + .ok_or_else(|| anyhow!("ReplicaSet missing pod spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + KubeWorkloadKind::Pod => { + let pod: Pod = serde_yaml::from_str(workload_yaml)?; + let labels = pod.metadata.labels.clone().unwrap_or_default(); + let pod_spec = pod + .spec + .as_ref() + .ok_or_else(|| anyhow!("Pod missing spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + KubeWorkloadKind::Job => { + let job: Job = serde_yaml::from_str(workload_yaml)?; + let labels = job + .spec + .as_ref() + .and_then(|s| s.template.metadata.as_ref()) + .and_then(|m| m.labels.clone()) + .unwrap_or_default(); + let pod_spec = job + .spec + .as_ref() + .and_then(|s| s.template.spec.as_ref()) + .ok_or_else(|| anyhow!("Job missing pod spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + KubeWorkloadKind::CronJob => { + let cronjob: CronJob = serde_yaml::from_str(workload_yaml)?; + let labels = cronjob + .spec + .as_ref() + .and_then(|s| s.job_template.spec.as_ref()) + .and_then(|js| js.template.metadata.as_ref()) + .and_then(|m| m.labels.clone()) + .unwrap_or_default(); + let pod_spec = cronjob + .spec + .as_ref() + .and_then(|s| s.job_template.spec.as_ref()) + .and_then(|js| js.template.spec.as_ref()) + .ok_or_else(|| anyhow!("CronJob missing pod spec"))?; + let containers = extract_pod_spec_containers(pod_spec)?; + Ok((containers, labels)) + } + } +} + +fn extract_pod_spec_containers(pod_spec: &PodSpec) -> anyhow::Result> { + pod_spec + .containers + .iter() + .map(|container| { + let mut cpu_request = None; + let mut cpu_limit = None; + let mut memory_request = None; + let mut memory_limit = None; + + if let Some(resources) = &container.resources { + if let Some(requests) = &resources.requests { + if let Some(cpu_req) = requests.get("cpu") { + cpu_request = Some(cpu_req.0.clone()); + } + if let Some(memory_req) = requests.get("memory") { + memory_request = Some(memory_req.0.clone()); + } + } + + if let Some(limits) = &resources.limits { + if let Some(cpu_lim) = limits.get("cpu") { + cpu_limit = Some(cpu_lim.0.clone()); + } + if let Some(memory_lim) = limits.get("memory") { + memory_limit = Some(memory_lim.0.clone()); + } + } + } + + let image = container + .image + .clone() + .ok_or_else(|| anyhow!("Container '{}' has no image specified", container.name))?; + + let ports = container + .ports + .as_ref() + .map(|ports| { + ports + .iter() + .map(|port| KubeContainerPort { + name: port.name.clone(), + container_port: port.container_port, + protocol: port.protocol.clone(), + }) + .collect::>() + }) + .unwrap_or_default(); + + Ok(KubeContainerInfo { + name: container.name.clone(), + original_image: image.clone(), + image: KubeContainerImage::FollowOriginal, + cpu_request, + cpu_limit, + memory_request, + memory_limit, + env_vars: Vec::new(), + original_env_vars: Vec::new(), + ports, + }) + }) + .collect() +} + +fn ports_from_cached_services( + workload_labels: &BTreeMap, + services: &[CachedClusterService], +) -> Vec { + let mut seen = HashSet::new(); + let mut ports = Vec::new(); + + for service in services { + if service.selector.is_empty() { + continue; + } + + let matches = service + .selector + .iter() + .all(|(key, value)| workload_labels.get(key).map_or(false, |v| v == value)); + + if !matches { + continue; + } + + for port in &service.ports { + let original_target = port.original_target_port.or(port.target_port); + let key = (port.port, original_target, port.protocol.clone()); + if seen.insert(key) { + ports.push(port.clone()); + } + } + } + + ports +} diff --git a/lapdev-api/src/lib.rs b/crates/api/src/lib.rs similarity index 52% rename from lapdev-api/src/lib.rs rename to crates/api/src/lib.rs index a0d05bf..155fd39 100644 --- a/lapdev-api/src/lib.rs +++ b/crates/api/src/lib.rs @@ -1,9 +1,19 @@ pub mod account; pub mod admin; +pub mod app_catalog_events; pub mod auth; pub mod cert; +pub mod cli_auth; +pub mod cluster_events; +pub mod devbox; +pub mod devbox_auth; +pub mod devbox_tunnels; +pub mod environment_events; pub mod github; pub mod gitlab; +pub mod hrpc_service; +pub mod kube; +pub mod kube_controller; pub mod machine_type; pub mod organization; pub mod prebuild; @@ -13,4 +23,5 @@ pub mod server; pub mod session; pub mod state; pub mod websocket; +pub mod websocket_socket; pub mod workspace; diff --git a/lapdev-api/src/machine_type.rs b/crates/api/src/machine_type.rs similarity index 90% rename from lapdev-api/src/machine_type.rs rename to crates/api/src/machine_type.rs index 6a630f7..08b33e7 100644 --- a/lapdev-api/src/machine_type.rs +++ b/crates/api/src/machine_type.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use axum::{ extract::{Path, State}, response::{IntoResponse, Response}, @@ -7,14 +9,13 @@ use axum_extra::{headers::Cookie, TypedHeader}; use chrono::Utc; use hyper::StatusCode; use lapdev_common::{CreateMachineType, MachineType, UpdateMachineType}; -use lapdev_db::entities; use lapdev_rpc::error::ApiError; use sea_orm::{ActiveModelTrait, ActiveValue}; use uuid::Uuid; use crate::state::CoreState; -pub async fn all_machine_types(State(state): State) -> Result { +pub async fn all_machine_types(State(state): State>) -> Result { let machine_types = state.db.get_all_machine_types().await?; let machine_types: Vec = machine_types .into_iter() @@ -33,7 +34,7 @@ pub async fn all_machine_types(State(state): State) -> Result, - State(state): State, + State(state): State>, Json(create_machine_type): Json, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; @@ -55,7 +56,7 @@ pub async fn create_machine_type( false }; - let machine_type = entities::machine_type::ActiveModel { + let machine_type = lapdev_db_entities::machine_type::ActiveModel { id: ActiveValue::Set(Uuid::new_v4()), deleted_at: ActiveValue::Set(None), name: ActiveValue::Set(create_machine_type.name), @@ -82,12 +83,12 @@ pub async fn create_machine_type( pub async fn update_machine_type( TypedHeader(cookie): TypedHeader, Path(machine_type_id): Path, - State(state): State, + State(state): State>, Json(update_machine_type): Json, ) -> Result, ApiError> { state.authenticate_cluster_admin(&cookie).await?; - let machine_type = entities::machine_type::ActiveModel { + let machine_type = lapdev_db_entities::machine_type::ActiveModel { id: ActiveValue::Set(machine_type_id), name: ActiveValue::Set(update_machine_type.name), cost_per_second: ActiveValue::Set(update_machine_type.cost_per_second as i32), @@ -110,7 +111,7 @@ pub async fn update_machine_type( pub async fn delete_machine_type( TypedHeader(cookie): TypedHeader, Path(machine_type_id): Path, - State(state): State, + State(state): State>, ) -> Result { state.authenticate_cluster_admin(&cookie).await?; @@ -121,7 +122,7 @@ pub async fn delete_machine_type( )); } - entities::machine_type::ActiveModel { + lapdev_db_entities::machine_type::ActiveModel { id: ActiveValue::Set(machine_type_id), deleted_at: ActiveValue::Set(Some(Utc::now().into())), ..Default::default() diff --git a/lapdev-api/src/organization.rs b/crates/api/src/organization.rs similarity index 90% rename from lapdev-api/src/organization.rs rename to crates/api/src/organization.rs index 4453bbd..72fc597 100644 --- a/lapdev-api/src/organization.rs +++ b/crates/api/src/organization.rs @@ -1,4 +1,4 @@ -use std::{str::FromStr, time::Duration}; +use std::{str::FromStr, sync::Arc, time::Duration}; use axum::{ extract::{Path, Query, State}, @@ -14,7 +14,6 @@ use lapdev_common::{ UpdateOrgQuota, UpdateOrganizationAutoStartStop, UpdateOrganizationMember, UpdateOrganizationName, UsageRequest, UsageResult, UserRole, }; -use lapdev_db::entities; use lapdev_rpc::error::ApiError; use sea_orm::{ ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, QueryFilter, TransactionTrait, @@ -25,7 +24,7 @@ use crate::state::{CoreState, RequestInfo}; pub async fn create_organization( TypedHeader(cookie): TypedHeader, - State(state): State, + State(state): State>, info: RequestInfo, Json(org): Json, ) -> Result { @@ -45,7 +44,7 @@ pub async fn create_organization( .create_new_organization(&txn, name.to_string()) .await?; - entities::organization_member::ActiveModel { + lapdev_db_entities::organization_member::ActiveModel { created_at: ActiveValue::Set(Utc::now().into()), user_id: ActiveValue::Set(user.id), organization_id: ActiveValue::Set(org.id), @@ -55,7 +54,7 @@ pub async fn create_organization( .insert(&txn) .await?; - entities::user::ActiveModel { + lapdev_db_entities::user::ActiveModel { id: ActiveValue::Set(user.id), current_organization: ActiveValue::Set(org.id), ..Default::default() @@ -97,7 +96,7 @@ pub async fn create_organization( pub async fn delete_organization( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -121,7 +120,7 @@ pub async fn delete_organization( let now = Utc::now(); let txn = state.db.conn.begin().await?; - entities::organization_member::ActiveModel { + lapdev_db_entities::organization_member::ActiveModel { id: ActiveValue::Set(member.id), deleted_at: ActiveValue::Set(Some(now.into())), ..Default::default() @@ -129,7 +128,7 @@ pub async fn delete_organization( .update(&txn) .await?; - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(org.id), deleted_at: ActiveValue::Set(Some(now.into())), ..Default::default() @@ -162,7 +161,7 @@ pub async fn delete_organization( pub async fn update_org_name( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, info: RequestInfo, Json(update_org): Json, ) -> Result { @@ -180,7 +179,7 @@ pub async fn update_org_name( let now = Utc::now(); let txn = state.db.conn.begin().await?; - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(org_id), name: ActiveValue::Set(update_org.name), ..Default::default() @@ -213,7 +212,7 @@ pub async fn update_org_name( pub async fn update_org_auto_start_stop( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, info: RequestInfo, Json(update_org): Json, ) -> Result { @@ -232,7 +231,7 @@ pub async fn update_org_auto_start_stop( let now = Utc::now(); let txn = state.db.conn.begin().await?; - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(org_id), auto_start: ActiveValue::Set(update_org.auto_start), auto_stop: ActiveValue::Set(update_org.auto_stop), @@ -272,14 +271,14 @@ pub async fn update_org_auto_start_stop( pub async fn join_organization( TypedHeader(cookie): TypedHeader, Path(invitation_id): Path, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; - let invitation = entities::user_invitation::Entity::find() - .filter(entities::user_invitation::Column::Id.eq(invitation_id)) - .filter(entities::user_invitation::Column::UsedAt.is_null()) + let invitation = lapdev_db_entities::user_invitation::Entity::find() + .filter(lapdev_db_entities::user_invitation::Column::Id.eq(invitation_id)) + .filter(lapdev_db_entities::user_invitation::Column::UsedAt.is_null()) .one(&state.db.conn) .await? .ok_or_else(|| ApiError::InvalidRequest("invitation id is invalid".to_string()))?; @@ -290,10 +289,10 @@ pub async fn join_organization( } let org_id = invitation.organization_id; - let member = entities::organization_member::Entity::find() - .filter(entities::organization_member::Column::UserId.eq(user.id)) - .filter(entities::organization_member::Column::OrganizationId.eq(org_id)) - .filter(entities::organization_member::Column::DeletedAt.is_null()) + let member = lapdev_db_entities::organization_member::Entity::find() + .filter(lapdev_db_entities::organization_member::Column::UserId.eq(user.id)) + .filter(lapdev_db_entities::organization_member::Column::OrganizationId.eq(org_id)) + .filter(lapdev_db_entities::organization_member::Column::DeletedAt.is_null()) .one(&state.db.conn) .await?; if member.is_some() { @@ -304,7 +303,7 @@ pub async fn join_organization( let now = Utc::now(); let txn = state.db.conn.begin().await?; - entities::organization_member::ActiveModel { + lapdev_db_entities::organization_member::ActiveModel { created_at: ActiveValue::Set(now.into()), user_id: ActiveValue::Set(user.id), organization_id: ActiveValue::Set(org_id), @@ -314,7 +313,7 @@ pub async fn join_organization( .insert(&txn) .await?; - entities::user::ActiveModel { + lapdev_db_entities::user::ActiveModel { id: ActiveValue::Set(user.id), current_organization: ActiveValue::Set(org_id), ..Default::default() @@ -322,7 +321,7 @@ pub async fn join_organization( .update(&txn) .await?; - entities::user_invitation::ActiveModel { + lapdev_db_entities::user_invitation::ActiveModel { id: ActiveValue::Set(invitation_id), used_at: ActiveValue::Set(Some(now.into())), ..Default::default() @@ -354,7 +353,7 @@ pub async fn join_organization( pub async fn get_organization_members( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, ) -> Result>, ApiError> { let user = state.authenticate(&cookie).await?; let member = state @@ -389,7 +388,7 @@ pub async fn get_organization_members( pub async fn update_organization_member( TypedHeader(cookie): TypedHeader, Path((org_id, member_user_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, Json(update_org_member): Json, ) -> Result, ApiError> { @@ -421,7 +420,7 @@ pub async fn update_organization_member( let now = Utc::now(); let txn = state.db.conn.begin().await?; - let member = entities::organization_member::ActiveModel { + let member = lapdev_db_entities::organization_member::ActiveModel { id: ActiveValue::Set(member.id), role: ActiveValue::Set(update_org_member.role.to_string()), ..Default::default() @@ -459,7 +458,7 @@ pub async fn update_organization_member( pub async fn delete_organization_member( TypedHeader(cookie): TypedHeader, Path((org_id, member_user_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -487,7 +486,7 @@ pub async fn delete_organization_member( let now = Utc::now(); let txn = state.db.conn.begin().await?; - entities::organization_member::ActiveModel { + lapdev_db_entities::organization_member::ActiveModel { id: ActiveValue::Set(member.id), deleted_at: ActiveValue::Set(Some(now.into())), ..Default::default() @@ -519,7 +518,7 @@ pub async fn delete_organization_member( pub async fn create_user_invitation( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; let member = state @@ -533,7 +532,7 @@ pub async fn create_user_invitation( let org = state.db.get_organization(member.organization_id).await?; let now = Utc::now(); - let invitation = entities::user_invitation::ActiveModel { + let invitation = lapdev_db_entities::user_invitation::ActiveModel { id: ActiveValue::Set(Uuid::new_v4()), created_at: ActiveValue::Set(now.into()), expires_at: ActiveValue::Set((now + Duration::from_secs(1800)).into()), @@ -549,7 +548,7 @@ pub async fn create_user_invitation( pub async fn get_organization_usage( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, Query(usage_request): Query, ) -> Result, ApiError> { state.require_enterprise().await?; @@ -584,7 +583,7 @@ pub async fn get_organization_usage( pub async fn get_organization_audit_log( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, Query(audit_log_request): Query, ) -> Result, ApiError> { state.require_enterprise().await?; @@ -617,7 +616,7 @@ pub async fn get_organization_audit_log( pub async fn get_organization_quota( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, ) -> Result, ApiError> { state.require_enterprise().await?; let user = state.authenticate(&cookie).await?; @@ -644,7 +643,7 @@ pub async fn get_organization_quota( pub async fn update_organization_quota( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, info: RequestInfo, Json(update_quota): Json, ) -> Result { @@ -703,14 +702,14 @@ pub async fn update_organization_quota( pub async fn clear_organization( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, ) -> Result { state.authenticate_cluster_admin(&cookie).await?; state.db.get_organization(org_id).await?; - let models = entities::workspace::Entity::find() - .filter(entities::workspace::Column::OrganizationId.eq(org_id)) - .filter(entities::workspace::Column::DeletedAt.is_null()) + let models = lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::OrganizationId.eq(org_id)) + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) .all(&state.db.conn) .await?; diff --git a/lapdev-api/src/prebuild.rs b/crates/api/src/prebuild.rs similarity index 100% rename from lapdev-api/src/prebuild.rs rename to crates/api/src/prebuild.rs diff --git a/lapdev-api/src/project.rs b/crates/api/src/project.rs similarity index 94% rename from lapdev-api/src/project.rs rename to crates/api/src/project.rs index 650441d..cf7059a 100644 --- a/lapdev-api/src/project.rs +++ b/crates/api/src/project.rs @@ -1,4 +1,4 @@ -use std::str::FromStr; +use std::{str::FromStr, sync::Arc}; use axum::{ extract::{Path, State}, @@ -12,7 +12,6 @@ use lapdev_common::{ AuditAction, AuditResourceKind, NewProject, NewProjectPrebuild, PrebuildStatus, ProjectInfo, ProjectPrebuild, UserRole, }; -use lapdev_db::entities; use lapdev_rpc::error::ApiError; use sea_orm::{ActiveModelTrait, ActiveValue, TransactionTrait}; use uuid::Uuid; @@ -22,7 +21,7 @@ use crate::state::{CoreState, RequestInfo}; pub async fn create_project( TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, info: RequestInfo, Json(project): Json, ) -> Result { @@ -44,7 +43,7 @@ pub async fn create_project( } pub async fn all_projects( - State(state): State, + State(state): State>, Path(org_id): Path, TypedHeader(cookie): TypedHeader, ) -> Result { @@ -72,7 +71,7 @@ pub async fn all_projects( pub async fn get_project( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, ) -> Result, ApiError> { let (_, project) = state.get_project(&cookie, org_id, project_id).await?; @@ -90,7 +89,7 @@ pub async fn get_project( pub async fn delete_project( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let (user, project) = state.get_project(&cookie, org_id, project_id).await?; @@ -118,7 +117,7 @@ pub async fn delete_project( pub async fn get_project_branches( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, ) -> Result { let (user, project) = state.get_project(&cookie, org_id, project_id).await?; let auth = if let Ok(Some(oauth)) = state.db.get_oauth(project.oauth_id).await { @@ -137,7 +136,7 @@ pub async fn get_project_branches( pub async fn get_project_prebuilds( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, ) -> Result { let (_, project) = state.get_project(&cookie, org_id, project_id).await?; let prebuilds = state.db.get_prebuilds(project.id).await?; @@ -160,7 +159,7 @@ pub async fn get_project_prebuilds( pub async fn create_project_prebuild( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, Json(prebuild): Json, ) -> Result { @@ -190,7 +189,7 @@ pub async fn create_project_prebuild( pub async fn delete_project_prebuild( TypedHeader(cookie): TypedHeader, Path((org_id, project_id, prebuild_id)): Path<(Uuid, Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let (user, project) = state.get_project(&cookie, org_id, project_id).await?; @@ -219,7 +218,7 @@ pub async fn delete_project_prebuild( pub async fn update_project_machine_type( TypedHeader(cookie): TypedHeader, Path((org_id, project_id, machine_type_id)): Path<(Uuid, Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let (user, project) = state.get_project(&cookie, org_id, project_id).await?; @@ -243,7 +242,7 @@ pub async fn update_project_machine_type( .ok_or_else(|| ApiError::InvalidRequest("Machine type doesn't exist".to_string()))?; let txn = state.db.conn.begin().await?; - entities::project::ActiveModel { + lapdev_db_entities::project::ActiveModel { id: ActiveValue::Set(project.id), machine_type_id: ActiveValue::Set(machine_type.id), ..Default::default() @@ -274,7 +273,7 @@ pub async fn update_project_machine_type( pub async fn get_project_env( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, ) -> Result>, ApiError> { let (user, project) = state.get_project(&cookie, org_id, project_id).await?; state @@ -292,7 +291,7 @@ pub async fn get_project_env( pub async fn update_project_env( TypedHeader(cookie): TypedHeader, Path((org_id, project_id)): Path<(Uuid, Uuid)>, - State(state): State, + State(state): State>, info: RequestInfo, Json(envs): Json>, ) -> Result { @@ -314,7 +313,7 @@ pub async fn update_project_env( let env = serde_json::to_string(&envs)?; let txn = state.db.conn.begin().await?; - entities::project::ActiveModel { + lapdev_db_entities::project::ActiveModel { id: ActiveValue::Set(project.id), env: ActiveValue::Set(Some(env)), ..Default::default() diff --git a/crates/api/src/router.rs b/crates/api/src/router.rs new file mode 100644 index 0000000..66d128f --- /dev/null +++ b/crates/api/src/router.rs @@ -0,0 +1,749 @@ +use std::sync::Arc; + +use axum::{ + debug_handler, + extract::{FromRequestParts, Query, Request, State, WebSocketUpgrade}, + middleware::{self, Next}, + response::{IntoResponse, Response}, + routing::{any, delete, get, post, put}, + Json, Router, +}; +use axum_client_ip::ClientIpSource; +use axum_extra::{extract::Host, headers, TypedHeader}; +use hyper::StatusCode; +use lapdev_api_hrpc::{HrpcService, HrpcServiceResponse}; +use lapdev_common::{error_page::render_error_page, WorkspaceStatus}; +use lapdev_proxy_http::{forward::ProxyForward, proxy::WorkspaceForwardError}; +use lapdev_rpc::error::ApiError; +use serde::Deserialize; + +use crate::{ + account, admin, app_catalog_events, cli_auth, cluster_events, + devbox::{ + devbox_client_tunnel_websocket, devbox_intercept_tunnel_websocket, devbox_rpc_websocket, + devbox_whoami, + }, + environment_events, + kube::{ + devbox_proxy_tunnel_websocket, kube_cluster_rpc_websocket, kube_data_plane_websocket, + sidecar_tunnel_websocket, + }, + kube_controller::container_images, + machine_type, organization, project, + session::{logout, new_session, session_authorize}, + state::CoreState, + websocket::handle_websocket, + workspace, +}; + +static STATIC_DIR: include_dir::Dir = + include_dir::include_dir!("$CARGO_MANIFEST_DIR/../dashboard/dist"); + +fn preview_error_response(message: impl Into) -> axum::response::Html { + let message = message.into(); + axum::response::Html(render_error_page(&message)) +} + +fn private_routes() -> Router> { + Router::new() + .route("/session", get(new_session)) + .route("/session", delete(logout)) + .route("/session/authorize", get(session_authorize)) + .route("/me", get(account::me)) + .route( + "/me/organization/{org_id}", + put(account::set_current_organization), + ) +} + +async fn not_found() -> impl IntoResponse { + StatusCode::NOT_FOUND +} + +fn v1_api_routes() -> Router> { + Router::new() + .route("/", any(not_found)) + .route("/{*wildcard}", any(not_found)) + .route("/cluster_info", get(admin::get_cluster_info)) + .route("/auth_providers", get(admin::get_auth_providers)) + .route("/hostnames", get(admin::get_hostnames)) + .route("/machine_types", get(machine_type::all_machine_types)) + // CLI authentication endpoints + .route( + "/auth/cli/generate-token", + post(cli_auth::generate_cli_token), + ) + .route("/auth/cli/token", get(cli_auth::cli_token_poll)) + .route("/devbox/whoami", get(devbox_whoami)) + .route( + "/join/{invitation_id}", + put(organization::join_organization), + ) + .route("/kube/cluster/rpc", any(kube_cluster_rpc_websocket)) + .route("/kube/cluster/tunnel", any(kube_data_plane_websocket)) + .route("/kube/devbox/rpc", any(devbox_rpc_websocket)) + .route( + "/kube/devbox/tunnel/intercept/{user_id}", + any(devbox_intercept_tunnel_websocket), + ) + .route( + "/kube/devbox/tunnel/client/{user_id}", + any(devbox_client_tunnel_websocket), + ) + .route( + "/kube/sidecar/tunnel/{environment_id}/{workload_id}", + any(sidecar_tunnel_websocket), + ) + .route( + "/kube/devbox-proxy/tunnel/{environment_id}", + any(devbox_proxy_tunnel_websocket), + ) + .route("/organizations", post(organization::create_organization)) + .route( + "/organizations/{org_id}", + delete(organization::delete_organization), + ) + .route( + "/organizations/{org_id}/name", + put(organization::update_org_name), + ) + .route( + "/organizations/{org_id}/auto_start_stop", + put(organization::update_org_auto_start_stop), + ) + .route( + "/organizations/{org_id}/members", + get(organization::get_organization_members), + ) + .route( + "/organizations/{org_id}/members/{user_id}", + put(organization::update_organization_member), + ) + .route( + "/organizations/{org_id}/members/{user_id}", + delete(organization::delete_organization_member), + ) + .route( + "/organizations/{org_id}/invitations", + post(organization::create_user_invitation), + ) + .route( + "/organizations/{org_id}/usage", + get(organization::get_organization_usage), + ) + .route( + "/organizations/{org_id}/audit_logs", + get(organization::get_organization_audit_log), + ) + .route( + "/organizations/{org_id}/quota", + get(organization::get_organization_quota), + ) + .route( + "/organizations/{org_id}/quota", + put(organization::update_organization_quota), + ) + .route( + "/organizations/{org_id}/kube/environments/{environment_id}/events", + get(environment_events::stream_environment_events), + ) + .route( + "/organizations/{org_id}/kube/environments/{environment_id}/workloads/events", + get(environment_events::stream_environment_workload_events), + ) + .route( + "/organizations/{org_id}/kube/environments/events", + get(environment_events::stream_organization_environment_events), + ) + .route( + "/organizations/{org_id}/kube/clusters/{cluster_id}/events", + get(cluster_events::stream_cluster_status_events), + ) + .route( + "/organizations/{org_id}/kube/clusters/events", + get(cluster_events::stream_organization_cluster_status_events), + ) + .route( + "/organizations/{org_id}/kube/catalogs/{catalog_id}/events", + get(app_catalog_events::stream_app_catalog_events), + ) + .route( + "/organizations/{org_id}/kube/catalogs/events", + get(app_catalog_events::stream_organization_app_catalog_events), + ) + .route( + "/organizations/{org_id}/projects", + post(project::create_project), + ) + .route( + "/organizations/{org_id}/projects", + get(project::all_projects), + ) + .route( + "/organizations/{org_id}/projects/{project_id}", + get(project::get_project), + ) + .route( + "/organizations/{org_id}/projects/{project_id}", + delete(project::delete_project), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/branches", + get(project::get_project_branches), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/prebuilds", + get(project::get_project_prebuilds), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/prebuilds", + post(project::create_project_prebuild), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/prebuilds/{prebuild_id}", + delete(project::delete_project_prebuild), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/machine_type/{machine_type_id}", + put(project::update_project_machine_type), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/env", + get(project::get_project_env), + ) + .route( + "/organizations/{org_id}/projects/{project_id}/env", + put(project::update_project_env), + ) + .route( + "/organizations/{org_id}/workspaces", + post(workspace::create_workspace), + ) + .route( + "/organizations/{org_id}/workspaces", + get(workspace::all_workspaces), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}", + get(workspace::get_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}", + delete(workspace::delete_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/start", + post(workspace::start_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/stop", + post(workspace::stop_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/pin", + post(workspace::pin_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/unpin", + post(workspace::unpin_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/rebuild", + post(workspace::rebuild_workspace), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/ports", + get(workspace::workspace_ports), + ) + .route( + "/organizations/{org_id}/workspaces/{workspace_name}/ports/{port}", + put(workspace::update_workspace_port), + ) + .route("/account/ssh_keys", post(account::create_ssh_key)) + .route("/account/ssh_keys", get(account::all_ssh_keys)) + .route( + "/account/ssh_keys/{key_id}", + delete(account::delete_ssh_key), + ) + .route("/account/git_providers", get(account::get_git_providers)) + .route( + "/account/git_providers/connect", + put(account::connect_git_provider), + ) + .route( + "/account/git_providers/disconnect", + put(account::disconnect_git_provider), + ) + .route( + "/account/git_providers/update_scope", + put(account::update_scope), + ) + .route("/admin/workspace_hosts", get(admin::get_workspace_hosts)) + .route("/admin/workspace_hosts", post(admin::create_workspace_host)) + .route( + "/admin/workspace_hosts/{workspace_host_id}", + put(admin::update_workspace_host), + ) + .route( + "/admin/workspace_hosts/{workspace_host_id}", + delete(admin::delete_workspace_host), + ) + .route("/admin/license", get(admin::get_license)) + .route("/admin/license", put(admin::update_license)) + .route("/admin/new_license", post(admin::new_license)) + .route("/admin/oauth", get(admin::get_oauth)) + .route("/admin/oauth", put(admin::update_oauth)) + .route("/admin/certs", get(admin::get_certs)) + .route("/admin/certs", put(admin::update_certs)) + .route("/admin/hostnames", put(admin::update_hostnames)) + .route("/admin/cpu_overcommit", get(admin::get_cpu_overcommit)) + .route( + "/admin/cpu_overcommit/{value}", + put(admin::update_cpu_overcommit), + ) + .route( + "/admin/machine_types", + post(machine_type::create_machine_type), + ) + .route( + "/admin/machine_types/{machine_type_id}", + put(machine_type::update_machine_type), + ) + .route( + "/admin/machine_types/{machine_type_id}", + delete(machine_type::delete_machine_type), + ) + .route("/admin/users", get(admin::get_cluster_users)) + .route("/admin/users/{user_id}", put(admin::update_cluster_user)) + .route( + "/admin/organizations/{org_id}/clear", + put(organization::clear_organization), + ) +} + +fn main_routes() -> Router> { + Router::new() + .nest("/v1", v1_api_routes()) + .nest("/private", private_routes()) + .route("/rpc", any(handle_rpc)) +} + +pub fn add_forward_middleware(state: Arc, router: Router) -> Router { + router + .route_layer(middleware::from_fn_with_state( + state.clone(), + forward_middleware, + )) + .layer(ClientIpSource::ConnectInfo.into_extension()) +} + +pub fn build_router(state: Arc) -> Router { + Router::new() + .route("/", any(handle_catch_all)) + .route("/{*wildcard}", any(handle_catch_all)) + .route("/health-check", get(health_check)) + .route( + "/install/lapdev-kube-manager.yaml", + get(kube_manager_install_manifest), + ) + .nest("/api", main_routes()) + .with_state(state) +} + +async fn health_check() {} + +#[derive(Debug, Deserialize)] +struct KubeManagerManifestQuery { + token: String, +} + +async fn kube_manager_install_manifest( + State(state): State>, + Query(query): Query, +) -> Result { + let token = query.token.trim(); + if token.is_empty() { + return Err(ApiError::InvalidRequest( + "token query parameter is required".to_string(), + )); + } + + let ws_base = state.websocket_base_url().await; + let ws_base = ws_base.trim_end_matches('/'); + let cluster_url = format!("{}/api/v1/kube/cluster/rpc", ws_base); + let tunnel_url = format!("{}/api/v1/kube/cluster/tunnel", ws_base); + let image = container_images::kube_manager_image_reference(); + + let manifest = format!( + r#"apiVersion: v1 +kind: Namespace +metadata: + name: lapdev +--- +apiVersion: v1 +kind: Secret +metadata: + name: lapdev-kube-manager + namespace: lapdev +type: Opaque +stringData: + LAPDEV_KUBE_CLUSTER_TOKEN: "{cluster_token}" + LAPDEV_KUBE_CLUSTER_URL: "{cluster_url}" + LAPDEV_KUBE_CLUSTER_TUNNEL_URL: "{tunnel_url}" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lapdev-kube-manager + namespace: lapdev +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lapdev-kube-manager +rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create", "update", "patch", "delete", "watch"] + - apiGroups: [""] + resources: ["pods", "services", "configmaps", "secrets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["deployments", "statefulsets", "daemonsets", "replicasets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: lapdev-kube-manager +subjects: + - kind: ServiceAccount + name: lapdev-kube-manager + namespace: lapdev +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: lapdev-kube-manager +--- +apiVersion: v1 +kind: Service +metadata: + name: lapdev-kube-manager + namespace: lapdev + labels: + app.kubernetes.io/name: lapdev-kube-manager + app.kubernetes.io/component: controller +spec: + selector: + app.kubernetes.io/name: lapdev-kube-manager + ports: + - name: sidecar-rpc + port: 5001 + targetPort: sidecar-rpc + - name: devbox-rpc + port: 7771 + targetPort: devbox-rpc +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lapdev-kube-manager + namespace: lapdev + labels: + app.kubernetes.io/name: lapdev-kube-manager + app.kubernetes.io/component: controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: lapdev-kube-manager + template: + metadata: + labels: + app.kubernetes.io/name: lapdev-kube-manager + app.kubernetes.io/component: controller + spec: + serviceAccountName: lapdev-kube-manager + containers: + - name: manager + image: {image} + imagePullPolicy: Always + env: + - name: LAPDEV_KUBE_CLUSTER_TOKEN + valueFrom: + secretKeyRef: + name: lapdev-kube-manager + key: LAPDEV_KUBE_CLUSTER_TOKEN + - name: LAPDEV_KUBE_CLUSTER_URL + valueFrom: + secretKeyRef: + name: lapdev-kube-manager + key: LAPDEV_KUBE_CLUSTER_URL + - name: LAPDEV_KUBE_CLUSTER_TUNNEL_URL + valueFrom: + secretKeyRef: + name: lapdev-kube-manager + key: LAPDEV_KUBE_CLUSTER_TUNNEL_URL + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 5001 + name: sidecar-rpc + - containerPort: 7771 + name: devbox-rpc + resources: + requests: + cpu: "100m" + memory: "256Mi" + limits: + cpu: "1" + memory: "512Mi" +"#, + cluster_token = token, + cluster_url = cluster_url, + tunnel_url = tunnel_url, + image = image, + ); + + let headers = [ + (axum::http::header::CONTENT_TYPE, "text/yaml"), + (axum::http::header::CACHE_CONTROL, "no-store"), + ]; + + Ok((StatusCode::OK, headers, manifest).into_response()) +} + +#[debug_handler] +async fn handle_catch_all( + State(state): State>, + // websocket: WebSocketUpgrade, + TypedHeader(cookie): TypedHeader, + req: Request, +) -> Result { + let (parts, _) = req.into_parts(); + let mut parts_clone = parts.clone(); + let websocket = WebSocketUpgrade::from_request_parts(&mut parts_clone, &()) + .await + .ok(); + + let path = parts.uri.path(); + tracing::debug!("handle catch all got {path}"); + + if let Some(websocket) = websocket { + let uri = &parts.uri; + let path = uri.path(); + let query = uri.query(); + let resp = handle_websocket(path, query, websocket, cookie, state).await?; + return Ok(resp); + } + + if let Some(f) = path.strip_prefix("/static/") { + let accept_gzip = parts + .headers + .get("Accept-Encoding") + .and_then(|h| h.to_str().ok()) + .map(|s| s.contains("gzip")) + .unwrap_or(false); + if let Some(file) = if let Some(d) = state.static_dir.as_ref() { + d.get_file(f) + } else { + STATIC_DIR.get_file(f) + } { + let content_type = if f.ends_with(".css") { + Some("text/css") + } else if f.ends_with(".js") { + Some("text/javascript") + } else if f.ends_with(".wasm") { + Some("application/wasm") + } else if f.ends_with(".ico") { + Some("image/vnd.microsoft.icon") + } else { + None + }; + if let Some(content_type) = content_type { + if accept_gzip { + if let Some(file) = if let Some(d) = state.static_dir.as_ref() { + d.get_file(format!("{f}.gz")) + } else { + STATIC_DIR.get_file(format!("{f}.gz")) + } { + return Ok(( + [ + (axum::http::header::CONTENT_TYPE, content_type), + ( + axum::http::header::CACHE_CONTROL, + "public, max-age=31536000", + ), + (axum::http::header::CONTENT_ENCODING, "gzip"), + ], + file.contents(), + ) + .into_response()); + } + } + return Ok(( + [ + (axum::http::header::CONTENT_TYPE, content_type), + ( + axum::http::header::CACHE_CONTROL, + "public, max-age=31536000", + ), + ], + file.contents(), + ) + .into_response()); + } + } + } + + if let Some(file) = if let Some(d) = state.static_dir.as_ref() { + d.get_file("index.html") + } else { + STATIC_DIR.get_file("index.html") + } { + return Ok(axum::response::Html::from(file.contents()).into_response()); + } + + Err(ApiError::InvalidRequest("Invalid Request".to_string())) +} + +async fn forward_middleware( + State(state): State>, + Host(hostname): Host, + TypedHeader(cookie): TypedHeader, + req: Request, + next: Next, +) -> Result { + let hostname = hostname + .split(":") + .next() + .ok_or_else(|| ApiError::InternalError("can't split : from hostname".to_string()))?; + let is_forward = state + .conductor + .hostnames + .read() + .await + .values() + .any(|v| v != hostname && hostname.ends_with(v)); + let (parts, body) = req.into_parts(); + let mut parts_clone = parts.clone(); + let websocket = WebSocketUpgrade::from_request_parts(&mut parts_clone, &()) + .await + .ok(); + + let path = parts.uri.path(); + let path_query = parts + .uri + .path_and_query() + .map(|v| v.as_str()) + .unwrap_or(path); + + if !is_forward { + if path == "/ws" || path == "/all_workspaces_ws" { + if let Some(websocket) = websocket { + let uri = &parts.uri; + let path = uri.path(); + let query = uri.query(); + let resp = handle_websocket(path, query, websocket, cookie, state).await?; + return Ok(resp); + } + } + + return Ok(next.run(Request::from_parts(parts, body)).await); + } + + let user = state.authenticate(&cookie).await.ok(); + match lapdev_proxy_http::proxy::forward_workspace(hostname, &state.db, user.as_ref()).await { + Ok((ws, port)) => { + if ws.status != WorkspaceStatus::Running.to_string() { + return Ok( + preview_error_response(format!( + "Preview host `{hostname}` points to environment `{}` which isn’t running right now.", + ws.name + )) + .into_response(), + ); + } + + let Some(workspace_host) = state.db.get_workspace_host(ws.host_id).await.ok().flatten() + else { + return Ok(preview_error_response(format!( + "Lapdev couldn’t find a compute host for environment `{}`.", + ws.name + )) + .into_response()); + }; + + let port = port + .map(|p| p.host_port as u16) + .or_else(|| ws.ide_port.map(|p| p as u16)); + if let Some(forward) = lapdev_proxy_http::forward::handler( + &workspace_host.host, + path_query, + websocket, + &parts.headers, + port, + ) + .await + { + match forward { + ProxyForward::Resp(resp) => Ok(resp), + ProxyForward::Proxy(uri) => { + // *req.uri_mut() = uri; + let headers = parts.headers.clone(); + let mut new_req = Request::builder() + .method(parts.method) + .uri(uri) + .body(body) + .unwrap(); + *new_req.headers_mut() = headers; + let resp = state.hyper_client.request(new_req).await?; + Ok(resp.into_response()) + } + } + } else { + Ok(preview_error_response(format!( + "Lapdev couldn’t find anything to serve at `{}{}{}`.", + hostname, + if path_query.starts_with('/') { "" } else { "/" }, + path_query + )) + .into_response()) + } + } + Err(e) => { + let message = match e { + WorkspaceForwardError::WorkspaceNotFound => format!( + "Lapdev couldn’t find any environment for preview host `{hostname}`." + ), + WorkspaceForwardError::PortNotForwarded => format!( + "Preview host `{hostname}` isn’t exposing the requested service port right now. Publish the port from Lapdev and try again." + ), + WorkspaceForwardError::InvalidHostname => format!( + "`{hostname}` isn’t a valid preview host." + ), + WorkspaceForwardError::Unauthorised => format!( + "You don’t have access to preview host `{hostname}`. Sign in with the correct organization." + ), + }; + Ok(preview_error_response(message).into_response()) + } + } +} + +async fn handle_rpc( + State(state): State>, + headers: axum::http::HeaderMap, + body: String, +) -> Result, ApiError> { + let r = state.handle_rpc(&headers, &body).await?; + Ok(Json(r)) +} diff --git a/crates/api/src/server.rs b/crates/api/src/server.rs new file mode 100644 index 0000000..61f2a61 --- /dev/null +++ b/crates/api/src/server.rs @@ -0,0 +1,203 @@ +use std::{env, net::SocketAddr, path::PathBuf, sync::Arc}; + +use anyhow::{anyhow, Context, Result}; +use axum::Router; +use clap::Parser; +use lapdev_conductor::Conductor; +use lapdev_db::api::DbApi; +use tokio::{net::TcpListener, time::Duration}; +use tracing::{error, info, warn}; + +use crate::{ + router::{self, add_forward_middleware}, + state::CoreState, +}; + +pub const LAPDEV_VERSION: &str = env!("CARGO_PKG_VERSION"); + +#[derive(Clone)] +struct LapdevConfig { + db_url: String, + bind_addr: String, + http_port: u16, + ssh_proxy_port: u16, + ssh_proxy_display_port: u16, + force_osuser: Option, + preview_url_proxy_port: u16, +} + +const DB_URL_ENV: &str = "LAPDEV_DB_URL"; +const BIND_ADDR_ENV: &str = "LAPDEV_BIND_ADDR"; +const HTTP_PORT_ENV: &str = "LAPDEV_HTTP_PORT"; +const SSH_PROXY_PORT_ENV: &str = "LAPDEV_SSH_PROXY_PORT"; +const SSH_PROXY_DISPLAY_PORT_ENV: &str = "LAPDEV_SSH_PROXY_DISPLAY_PORT"; +const FORCE_OSUSER_ENV: &str = "LAPDEV_FORCE_OSUSER"; +const PREVIEW_PROXY_PORT_ENV: &str = "LAPDEV_PREVIEW_URL_PROXY_PORT"; + +impl LapdevConfig { + fn from_env() -> Result { + let db_url = env::var(DB_URL_ENV) + .map_err(|_| anyhow!("environment variable {DB_URL_ENV} is required"))?; + let bind_addr = env::var(BIND_ADDR_ENV).unwrap_or_else(|_| "0.0.0.0".to_string()); + let http_port = Self::parse_port(HTTP_PORT_ENV, 8080)?; + let ssh_proxy_port = Self::parse_port(SSH_PROXY_PORT_ENV, 2222)?; + let ssh_proxy_display_port = Self::parse_port(SSH_PROXY_DISPLAY_PORT_ENV, 2222)?; + let preview_url_proxy_port = Self::parse_port(PREVIEW_PROXY_PORT_ENV, 8443)?; + + let force_osuser = match env::var(FORCE_OSUSER_ENV) { + Ok(value) if value.trim().is_empty() => None, + Ok(value) => Some(value), + Err(env::VarError::NotPresent) => None, + Err(err) => return Err(anyhow!("error reading {FORCE_OSUSER_ENV}: {err}")), + }; + + Ok(Self { + db_url, + bind_addr, + http_port, + ssh_proxy_port, + ssh_proxy_display_port, + force_osuser, + preview_url_proxy_port, + }) + } + + fn parse_port(var: &str, default: u16) -> Result { + match env::var(var) { + Ok(value) => value + .parse::() + .map_err(|err| anyhow!("{var} must be a valid u16: {err}")), + Err(env::VarError::NotPresent) => Ok(default), + Err(err) => Err(anyhow!("error reading {var}: {err}")), + } + } +} + +#[derive(Parser)] +#[clap(name = "lapdev")] +#[clap(version = env!("CARGO_PKG_VERSION"))] +struct Cli { + /// The folder for putting data + #[clap(short, long, action, value_hint = clap::ValueHint::AnyPath)] + data_folder: Option, +} + +pub struct ApiServer { + config: LapdevConfig, + pub state: Arc, + pub app: Router, + pub conductor: Conductor, +} + +impl ApiServer { + pub async fn new(static_dir: Option>) -> Result { + let cli = Cli::parse(); + let data_folder = cli + .data_folder + .clone() + .unwrap_or_else(|| PathBuf::from("/var/lib/lapdev")); + + init_tracing(); + + let config = LapdevConfig::from_env()?; + let db = DbApi::new(&config.db_url).await?; + let conductor = Conductor::new( + LAPDEV_VERSION, + db.clone(), + data_folder, + config.force_osuser.clone(), + ) + .await?; + + let state = Arc::new( + CoreState::new( + conductor.clone(), + config.ssh_proxy_port, + config.ssh_proxy_display_port, + static_dir, + ) + .await, + ); + + let app = router::build_router(state.clone()); + Ok(Self { + config, + state, + conductor, + app, + }) + } + + pub async fn run(&self) -> Result<()> { + { + let db = self.conductor.db.clone(); + let tunnel_registry = self.state.kube_controller.tunnel_registry.clone(); + let host = self.config.bind_addr.clone(); + let port = self.config.preview_url_proxy_port; + tokio::spawn(async move { + loop { + let db_clone = db.clone(); + let tunnel_clone = tunnel_registry.clone(); + let bind = format!("{host}:{port}"); + match lapdev_kube::start_preview_url_proxy_server(db_clone, tunnel_clone, &bind) + .await + { + Ok(_) => { + info!("Preview URL proxy server exited gracefully"); + break; + } + Err(err) => { + error!("Preview URL proxy server failed: {err:?}"); + warn!("Retrying preview URL proxy server in 5 seconds"); + tokio::time::sleep(Duration::from_secs(5)).await; + } + } + } + }); + } + + let app_with_forward = add_forward_middleware(self.state.clone(), self.app.clone()); + + let bind = format!( + "{}:{}", + self.config.bind_addr.clone(), + self.config.http_port + ); + let tcp_listener = TcpListener::bind(&bind) + .await + .with_context(|| format!("bind to {bind}"))?; + + info!(%bind, "Starting Axum HTTP server"); + + if let Err(err) = axum::serve( + tcp_listener, + app_with_forward.into_make_service_with_connect_info::(), + ) + .await + { + tracing::error!("http server stopped error: {err}"); + } + + Ok(()) + } +} + +pub async fn start(static_dir: Option>) { + match ApiServer::new(static_dir).await { + Ok(server) => { + if let Err(e) = server.run().await { + tracing::error!("lapdev api server error: {e:#}"); + } + } + Err(e) => tracing::error!("failed to initialize lapdev api server: {e:#}"), + } +} + +fn init_tracing() { + let var = std::env::var("RUST_LOG").unwrap_or_default(); + let var = format!( + "error,tarpc=warn,tarpc::client=warn,lapdev=info,lapdev_api=info,lapdev_conductor=info,lapdev_rpc=info,lapdev_common=info,lapdev_db=info,lapdev_enterprise=info,lapdev_proxy_ssh=info,lapdev_proxy_http=info,lapdev_kube=info,lapdev_kube_manager=info,{var}" + ); + let filter = tracing_subscriber::EnvFilter::builder().parse_lossy(var); + tracing_subscriber::fmt().with_env_filter(filter).init(); +} diff --git a/lapdev-api/src/session.rs b/crates/api/src/session.rs similarity index 88% rename from lapdev-api/src/session.rs rename to crates/api/src/session.rs index 4025e9a..3baedcb 100644 --- a/lapdev-api/src/session.rs +++ b/crates/api/src/session.rs @@ -1,19 +1,18 @@ -use std::{collections::HashMap, net::IpAddr, str::FromStr}; +use std::{collections::HashMap, net::IpAddr, str::FromStr, sync::Arc}; use axum::{ - extract::{Host, Query, State}, + extract::{Query, State}, http::{header::SET_COOKIE, HeaderMap}, response::{IntoResponse, Redirect, Response}, Json, }; -use axum_extra::{headers, TypedHeader}; +use axum_extra::{extract::Host, headers, TypedHeader}; use chrono::Utc; use hyper::StatusCode; use lapdev_common::{ console::NewSessionResponse, AuditAction, AuditResourceKind, AuthProvider, ProviderUser, UserCreationWebhook, }; -use lapdev_db::entities; use lapdev_rpc::error::ApiError; use pasetors::claims::Claims; use sea_orm::{ @@ -23,10 +22,9 @@ use sea_orm::{ use serde::Deserialize; use uuid::Uuid; -use crate::state::{CoreState, RequestInfo, TOKEN_COOKIE_NAME}; +use crate::state::{CoreState, RequestInfo, OAUTH_STATE_COOKIE, TOKEN_COOKIE_NAME}; pub const OAUTH_STATE: &str = "oauth_state"; -pub const OAUTH_STATE_COOKIE: &str = "state"; pub const REDIRECT_URL: &str = "redirect_url"; pub const CONNECT_USER: &str = "connect_user"; pub const READ_REPO: &str = "read_repo"; @@ -98,7 +96,7 @@ pub async fn create_oauth_connection( pub(crate) async fn new_session( Host(hostname): Host, Query(query): Query>, - State(state): State, + State(state): State>, ) -> Result { let (headers, url) = create_oauth_connection(&state, None, false, &hostname, &query).await?; Ok((headers, Json(NewSessionResponse { url })).into_response()) @@ -107,7 +105,7 @@ pub(crate) async fn new_session( pub(crate) async fn session_authorize( Host(hostname): Host, Query(query): Query, - State(state): State, + State(state): State>, info: RequestInfo, TypedHeader(cookie): TypedHeader, ) -> Result { @@ -200,29 +198,32 @@ pub(crate) async fn session_authorize( let read_repo: bool = serde_json::from_value(read_repo.to_owned()) .map_err(|_| ApiError::InvalidRequest("invalid read repo".to_string()))?; - match entities::oauth_connection::Entity::find() - .filter(entities::oauth_connection::Column::Provider.eq(query.provider.to_string())) - .filter(entities::oauth_connection::Column::UserId.eq(user_id)) - .filter(entities::oauth_connection::Column::DeletedAt.is_null()) + match lapdev_db_entities::oauth_connection::Entity::find() + .filter( + lapdev_db_entities::oauth_connection::Column::Provider + .eq(query.provider.to_string()), + ) + .filter(lapdev_db_entities::oauth_connection::Column::UserId.eq(user_id)) + .filter(lapdev_db_entities::oauth_connection::Column::DeletedAt.is_null()) .one(&state.db.conn) .await? { Some(c) => { - entities::oauth_connection::ActiveModel { + lapdev_db_entities::oauth_connection::ActiveModel { id: ActiveValue::Set(c.id), provider_login: ActiveValue::Set(provider_user.login), access_token: ActiveValue::Set(token.secret().to_string()), avatar_url: ActiveValue::Set(provider_user.avatar_url.clone()), email: ActiveValue::Set(provider_user.email.clone()), name: ActiveValue::Set(provider_user.name.clone()), - read_repo: ActiveValue::Set(read_repo), + read_repo: ActiveValue::Set(Some(read_repo)), ..Default::default() } .update(&state.db.conn) .await?; } None => { - entities::oauth_connection::ActiveModel { + lapdev_db_entities::oauth_connection::ActiveModel { id: ActiveValue::Set(Uuid::new_v4()), user_id: ActiveValue::Set(user_id), created_at: ActiveValue::Set(now.into()), @@ -234,7 +235,7 @@ pub(crate) async fn session_authorize( avatar_url: ActiveValue::Set(provider_user.avatar_url), email: ActiveValue::Set(provider_user.email), name: ActiveValue::Set(provider_user.name), - read_repo: ActiveValue::Set(read_repo), + read_repo: ActiveValue::Set(Some(read_repo)), } .insert(&state.db.conn) .await?; @@ -244,15 +245,17 @@ pub(crate) async fn session_authorize( return Ok(Redirect::temporary(query.next.as_deref().unwrap_or("/")).into_response()); } - let user = match entities::oauth_connection::Entity::find() - .filter(entities::oauth_connection::Column::Provider.eq(query.provider.to_string())) - .filter(entities::oauth_connection::Column::ProviderId.eq(provider_user.id)) - .filter(entities::oauth_connection::Column::DeletedAt.is_null()) + let user = match lapdev_db_entities::oauth_connection::Entity::find() + .filter( + lapdev_db_entities::oauth_connection::Column::Provider.eq(query.provider.to_string()), + ) + .filter(lapdev_db_entities::oauth_connection::Column::ProviderId.eq(provider_user.id)) + .filter(lapdev_db_entities::oauth_connection::Column::DeletedAt.is_null()) .one(&state.db.conn) .await? { Some(conn) => { - let conn = entities::oauth_connection::ActiveModel { + let conn = lapdev_db_entities::oauth_connection::ActiveModel { id: ActiveValue::Set(conn.id), provider_login: ActiveValue::Set(provider_user.login), access_token: ActiveValue::Set(token.secret().to_string()), @@ -264,7 +267,7 @@ pub(crate) async fn session_authorize( .update(&state.db.conn) .await?; - entities::user::ActiveModel { + lapdev_db_entities::user::ActiveModel { id: ActiveValue::Set(conn.user_id), avatar_url: ActiveValue::Set(provider_user.avatar_url), email: ActiveValue::Set(provider_user.email), @@ -285,8 +288,8 @@ pub(crate) async fn session_authorize( .as_ref() { if enterprise.users > 0 { - let count = entities::user::Entity::find() - .filter(entities::user::Column::DeletedAt.is_null()) + let count = lapdev_db_entities::user::Entity::find() + .filter(lapdev_db_entities::user::Column::DeletedAt.is_null()) .count(&state.db.conn) .await?; if count >= enterprise.users as u64 { diff --git a/crates/api/src/state.rs b/crates/api/src/state.rs new file mode 100644 index 0000000..d8e0956 --- /dev/null +++ b/crates/api/src/state.rs @@ -0,0 +1,897 @@ +use std::{ + collections::{BTreeMap, HashMap}, + str::FromStr, + sync::Arc, +}; + +use anyhow::{anyhow, Context, Result}; +use axum::{body::Body, extract::FromRequestParts, http::request::Parts, RequestPartsExt}; +use axum_client_ip::ClientIp; +use axum_extra::{ + headers::{self, Cookie, HeaderMapExt, UserAgent}, + TypedHeader, +}; +use chrono::{DateTime, Utc}; +use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor}; +use lapdev_common::{ + devbox::DirectTunnelConfig, + kube::{AppCatalogStatusEvent, ClusterStatusEvent, EnvironmentWorkloadStatusEvent}, + utils::resolve_api_host, + UserRole, LAPDEV_AUTH_STATE_COOKIE, LAPDEV_AUTH_TOKEN_COOKIE, LAPDEV_BASE_HOSTNAME, +}; +use lapdev_conductor::{scheduler::LAPDEV_CPU_OVERCOMMIT, Conductor}; +use lapdev_db::api::DbApi; +use lapdev_devbox_rpc::PortMapping; +use lapdev_enterprise::license::LAPDEV_ENTERPRISE_LICENSE; +use lapdev_kube_rpc::DevboxRouteConfig; +use lapdev_rpc::error::ApiError; +use pasetors::{ + claims::ClaimsValidationRules, + keys::SymmetricKey, + token::{TrustedToken, UntrustedToken}, + version4::V4, +}; +use sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; +use serde::Deserialize; +use sqlx::postgres::PgNotification; +use tokio::{ + sync::{broadcast, RwLock}, + time::{sleep, Duration}, +}; +use tokio_rustls::rustls::sign::CertifiedKey; +use uuid::Uuid; + +use crate::{ + auth::{Auth, AuthConfig}, + cert::{load_cert, CertStore}, + devbox_tunnels::DevboxTunnelRegistry, + github::GithubClient, + kube_controller::KubeController, +}; + +use crate::environment_events::EnvironmentLifecycleEvent; + +pub const OAUTH_STATE_COOKIE: &str = LAPDEV_AUTH_STATE_COOKIE; +pub const TOKEN_COOKIE_NAME: &str = LAPDEV_AUTH_TOKEN_COOKIE; +pub const LAPDEV_CERTS: &str = "lapdev-certs"; + +pub type HyperClient = hyper_util::client::legacy::Client; + +pub struct RequestInfo { + pub ip: Option, + pub user_agent: Option, +} + +#[derive(Debug, Deserialize)] +struct ConfigUpdatePayload { + name: String, + value: String, +} + +impl FromRequestParts for RequestInfo { + type Rejection = ApiError; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let ip = parts + .extract::() + .await + .ok() + .map(|ClientIp(ip)| ip.to_string()); + let user_agent = parts.extract::>().await.ok(); + let user_agent = user_agent.map(|u| u.to_string()); + Ok(Self { user_agent, ip }) + } +} + +/// Temporary storage for CLI authentication tokens during browser login flow +pub struct PendingCliAuth { + pub token: String, + pub expires_at: DateTime, +} + +#[derive(Clone)] +pub struct CoreState { + pub conductor: Conductor, + pub github_client: GithubClient, + pub hyper_client: Arc, + pub db: DbApi, + pub auth: Arc, + pub auth_token_key: Arc>, + pub certs: CertStore, + // actuall ssh proxy port + pub ssh_proxy_port: u16, + // ssh proxy port to display in front end + pub ssh_proxy_display_port: u16, + pub static_dir: Arc>>, + // Kubernetes controller + pub kube_controller: KubeController, + // Devbox tunnel registry for session-level bridging + pub devbox_tunnels: Arc, + // Pending CLI authentication tokens (session_id -> token) + pub pending_cli_auth: Arc>>, + // Active devbox sessions (user_id -> DevboxSessionHandle) + pub active_devbox_sessions: Arc>>>, + // Lifecycle notifications for kube environments + pub environment_events: broadcast::Sender, + pub environment_workload_events: broadcast::Sender, + pub cluster_events: broadcast::Sender, + pub app_catalog_events: broadcast::Sender, +} + +/// Handle for an active devbox session +#[derive(Clone)] +pub struct DevboxSessionHandle { + pub session_id: Uuid, + pub device_name: String, + pub notify_tx: tokio::sync::mpsc::UnboundedSender, + pub rpc_client: lapdev_devbox_rpc::DevboxClientRpcClient, + pub connection_id: Uuid, +} + +/// Notifications that can be sent to active devbox sessions +#[derive(Debug, Clone)] +pub enum DevboxSessionNotification { + Displaced { new_device_name: String }, +} + +impl CoreState { + pub async fn new( + conductor: Conductor, + ssh_proxy_port: u16, + ssh_proxy_display_port: u16, + static_dir: Option>, + ) -> Self { + let github_client = GithubClient::new(); + let key = conductor.db.load_api_auth_token_key().await; + let auth = Auth::new(&conductor.db).await; + let certs = load_certs(&conductor.db).await.unwrap_or_default(); + + let hyper_client: HyperClient = + hyper_util::client::legacy::Client::<(), ()>::builder(TokioExecutor::new()) + .build(HttpConnector::new()); + + let db = conductor.db.clone(); + let (environment_events, _) = broadcast::channel(128); + let (environment_workload_events, _) = broadcast::channel(256); + let (cluster_events, _) = broadcast::channel(128); + let (app_catalog_events, _) = broadcast::channel(128); + let state = Self { + db: db.clone(), + conductor, + github_client, + auth: Arc::new(auth), + auth_token_key: Arc::new(key), + certs: Arc::new(std::sync::RwLock::new(Arc::new(certs))), + ssh_proxy_port, + ssh_proxy_display_port, + hyper_client: Arc::new(hyper_client), + static_dir: Arc::new(static_dir), + kube_controller: KubeController::new(db, environment_events.clone()), + devbox_tunnels: Arc::new(DevboxTunnelRegistry::new()), + pending_cli_auth: Arc::new(RwLock::new(HashMap::new())), + active_devbox_sessions: Arc::new(RwLock::new(HashMap::new())), + environment_events, + environment_workload_events: environment_workload_events.clone(), + cluster_events, + app_catalog_events, + }; + + state.spawn_background_tasks(); + + state + } + + fn spawn_background_tasks(&self) { + { + let state = self.clone(); + tokio::spawn(async move { + if let Err(e) = state.monitor_config_updates().await { + tracing::error!("api monitor config updates error: {e}"); + } + }); + } + + { + let state = self.clone(); + tokio::spawn(async move { + state.cleanup_pending_cli_auth_loop().await; + }); + } + + Self::spawn_listener("environment", self.clone(), |s| async move { + s.monitor_environment_events().await + }); + Self::spawn_listener("environment_workload", self.clone(), |s| async move { + s.monitor_environment_workload_events().await + }); + Self::spawn_listener("cluster", self.clone(), |s| async move { + s.monitor_cluster_events().await + }); + Self::spawn_listener("app_catalog", self.clone(), |s| async move { + s.monitor_app_catalog_events().await + }); + } + + fn spawn_listener(name: &'static str, state: CoreState, factory: F) + where + F: Fn(CoreState) -> Fut + Send + Sync + 'static, + Fut: std::future::Future> + Send, + { + tokio::spawn(async move { + loop { + match factory(state.clone()).await { + Ok(_) => break, + Err(e) => { + tracing::error!( + listener = name, + error = %e, + "listener error; retrying in 5s" + ); + sleep(Duration::from_secs(5)).await; + } + } + } + }); + } + + async fn monitor_config_updates(&self) -> Result<()> { + let pool = self + .db + .pool + .clone() + .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + listener.listen("config_update").await?; + loop { + let notification = listener.recv().await?; + if let Err(e) = self.handle_config_update_notification(notification).await { + tracing::error!("handle config update notification error: {e:#}"); + } + } + } + + async fn handle_config_update_notification(&self, notification: PgNotification) -> Result<()> { + let payload: ConfigUpdatePayload = serde_json::from_str(notification.payload()) + .with_context(|| format!("trying to deserialize payload {}", notification.payload()))?; + if payload.name == AuthConfig::GITHUB.client_id + || payload.name == AuthConfig::GITHUB.client_secret + || payload.name == AuthConfig::GITLAB.client_id + || payload.name == AuthConfig::GITLAB.client_secret + { + self.auth.resync(&self.db).await; + } else if payload.name == LAPDEV_ENTERPRISE_LICENSE { + self.conductor.enterprise.license.resync_license().await; + } else if payload.name == LAPDEV_CPU_OVERCOMMIT { + if let Ok(v) = payload.value.parse::() { + *self.conductor.cpu_overcommit.write().await = v.max(1); + } + } else if payload.name == LAPDEV_BASE_HOSTNAME { + *self.conductor.hostnames.write().await = self + .conductor + .enterprise + .get_hostnames() + .await + .unwrap_or_default(); + } else if payload.name == LAPDEV_CERTS { + if let Ok(certs) = load_certs(&self.db).await { + if let Ok(mut current) = self.certs.write() { + *current = Arc::new(certs); + } + } + } + Ok(()) + } + + async fn cleanup_pending_cli_auth_loop(&self) { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + let now = Utc::now(); + let mut pending = self.pending_cli_auth.write().await; + pending.retain(|_, auth| auth.expires_at > now); + let removed = pending.len(); + if removed > 0 { + tracing::debug!("Cleaned up {} expired CLI auth tokens", removed); + } + } + } + + async fn monitor_environment_events(&self) -> Result<()> { + let pool = self + .db + .pool + .clone() + .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + listener.listen("environment_lifecycle").await?; + tracing::info!("environment lifecycle listener started"); + loop { + let notification = listener.recv().await?; + match serde_json::from_str::(notification.payload()) { + Ok(event) => { + let _ = self.environment_events.send(event); + } + Err(err) => { + tracing::error!( + payload = notification.payload(), + error = ?err, + "failed to deserialize environment lifecycle notification" + ); + } + } + } + } + + async fn monitor_environment_workload_events(&self) -> Result<()> { + let pool = self + .db + .pool + .clone() + .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + listener.listen("environment_workload_status").await?; + tracing::info!("environment workload listener started"); + loop { + let notification = listener.recv().await?; + match serde_json::from_str::(notification.payload()) { + Ok(event) => { + let _ = self.environment_workload_events.send(event); + } + Err(err) => { + tracing::error!( + payload = notification.payload(), + error = ?err, + "failed to deserialize environment workload status notification" + ); + } + } + } + } + + async fn monitor_cluster_events(&self) -> Result<()> { + let pool = self + .db + .pool + .clone() + .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + listener.listen("cluster_status").await?; + tracing::info!("cluster status listener started"); + loop { + let notification = listener.recv().await?; + match serde_json::from_str::(notification.payload()) { + Ok(event) => { + let _ = self.cluster_events.send(event); + } + Err(err) => { + tracing::error!( + payload = notification.payload(), + error = ?err, + "failed to deserialize cluster status notification" + ); + } + } + } + } + + async fn monitor_app_catalog_events(&self) -> Result<()> { + let pool = self + .db + .pool + .clone() + .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + listener.listen("app_catalog_status").await?; + tracing::info!("app catalog status listener started"); + loop { + let notification = listener.recv().await?; + match serde_json::from_str::(notification.payload()) { + Ok(event) => { + let _ = self.app_catalog_events.send(event); + } + Err(err) => { + tracing::error!( + payload = notification.payload(), + error = ?err, + "failed to deserialize app catalog status notification" + ); + } + } + } + } + + pub async fn websocket_base_url(&self) -> String { + let host = if let Ok(value) = std::env::var("LAPDEV_API_HOST") { + resolve_api_host(Some(value.as_str())) + } else { + let stored = self + .conductor + .hostnames + .read() + .await + .get("") + .cloned() + .unwrap_or_default(); + let stored_ref = if stored.trim().is_empty() { + None + } else { + Some(stored.as_str()) + }; + resolve_api_host(stored_ref) + }; + + CoreState::normalize_ws_base(&host) + } + + fn normalize_ws_base(candidate: &str) -> String { + let trimmed = candidate.trim(); + let with_scheme = if trimmed.starts_with("http://") + || trimmed.starts_with("https://") + || trimmed.starts_with("ws://") + || trimmed.starts_with("wss://") + { + trimmed.to_string() + } else { + format!("https://{}", trimmed) + }; + + if with_scheme.starts_with("http://") { + with_scheme.replacen("http://", "ws://", 1) + } else if with_scheme.starts_with("https://") { + with_scheme.replacen("https://", "wss://", 1) + } else { + with_scheme + } + } + + pub async fn push_devbox_routes(&self, user_id: Uuid, environment_id: Uuid) { + match self + .build_devbox_route_snapshot(user_id, environment_id) + .await + { + Ok((cluster_id, base_environment_id, routes)) => { + if let Err(err) = self + .kube_controller + .set_devbox_routes( + cluster_id, + base_environment_id.unwrap_or(environment_id), + routes, + ) + .await + { + tracing::warn!( + environment_id = %environment_id, + error = %err, + "Failed to push devbox routes to kube-manager" + ); + } + } + Err(err) => { + tracing::warn!( + environment_id = %environment_id, + error = %err, + "Failed to build devbox route snapshot" + ); + } + } + } + + pub async fn push_devbox_route_for_intercept( + &self, + environment: lapdev_db_entities::kube_environment::Model, + intercept: lapdev_db_entities::kube_devbox_workload_intercept::Model, + ) { + let base = self.websocket_base_url().await; + let base_trimmed = base.trim_end_matches('/').to_string(); + + match self + .route_workload_id_for_intercept(&intercept) + .await + .and_then(|route_workload_id| { + Self::build_devbox_route_config_from_intercept( + base_trimmed.as_str(), + &environment, + &intercept, + route_workload_id, + ) + }) { + Ok(route) => { + let (target_environment_id, _) = Self::route_environment_targets(&environment); + if let Err(err) = self + .kube_controller + .set_devbox_route(environment.cluster_id, target_environment_id, route) + .await + { + tracing::warn!( + environment_id = %environment.id, + workload_id = %intercept.workload_id, + intercept_id = %intercept.id, + error = %err, + "Failed to push devbox route for intercept" + ); + } + } + Err(err) => { + tracing::warn!( + environment_id = %environment.id, + intercept_id = %intercept.id, + error = %err, + "Failed to build devbox route for intercept" + ); + } + } + } + + pub async fn clear_devbox_route_for_intercept( + &self, + environment: lapdev_db_entities::kube_environment::Model, + intercept: lapdev_db_entities::kube_devbox_workload_intercept::Model, + ) { + let (target_environment_id, branch_environment_id) = + Self::route_environment_targets(&environment); + + let route_workload_id = match self.route_workload_id_for_intercept(&intercept).await { + Ok(id) => id, + Err(err) => { + tracing::warn!( + environment_id = %environment.id, + intercept_id = %intercept.id, + workload_id = %intercept.workload_id, + error = %err, + "Failed to resolve workload for devbox route removal" + ); + return; + } + }; + + if let Err(err) = self + .kube_controller + .remove_devbox_route( + environment.cluster_id, + target_environment_id, + route_workload_id, + branch_environment_id, + ) + .await + { + tracing::warn!( + environment_id = %environment.id, + workload_id = %intercept.workload_id, + intercept_id = %intercept.id, + error = %err, + "Failed to clear devbox route for intercept" + ); + } + } + + pub async fn clear_devbox_routes_for_environment(&self, environment_id: Uuid) { + match self + .db + .get_kube_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch environment: {e}")) + { + Ok(Some(environment)) => { + if let Err(err) = self + .kube_controller + .clear_devbox_routes( + environment.cluster_id, + environment.base_environment_id.unwrap_or(environment_id), + environment.base_environment_id.map(|_| environment_id), + ) + .await + { + tracing::warn!( + environment_id = %environment_id, + error = %err, + "Failed to clear devbox routes for environment" + ); + } + } + Ok(None) => { + tracing::debug!( + environment_id = %environment_id, + "Environment not found while clearing devbox routes" + ); + } + Err(err) => { + tracing::warn!( + environment_id = %environment_id, + error = %err, + "Failed to clear devbox routes for environment" + ); + } + } + } + + async fn build_devbox_route_snapshot( + &self, + user_id: Uuid, + environment_id: Uuid, + ) -> Result<(Uuid, Option, HashMap), String> { + let environment = self + .db + .get_kube_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch environment: {e}"))? + .ok_or_else(|| "Environment not found".to_string())?; + + let intercepts = self + .db + .get_active_intercepts_for_environment(environment_id) + .await + .map_err(|e| format!("Failed to fetch intercepts: {e}"))?; + + let base = self.websocket_base_url().await; + let base_trimmed = base.trim_end_matches('/'); + + let mut routes: HashMap = HashMap::new(); + + for intercept in intercepts { + if intercept.user_id != user_id { + continue; + } + + let route_workload_id = self.route_workload_id_for_intercept(&intercept).await?; + + let route = Self::build_devbox_route_config_from_intercept( + base_trimmed, + &environment, + &intercept, + route_workload_id, + )?; + + routes.insert(route_workload_id, route); + } + + Ok(( + environment.cluster_id, + environment.base_environment_id, + routes, + )) + } + + fn build_devbox_route_config_from_intercept( + base_trimmed: &str, + environment: &lapdev_db_entities::kube_environment::Model, + intercept: &lapdev_db_entities::kube_devbox_workload_intercept::Model, + route_workload_id: Uuid, + ) -> Result { + let port_mappings = Self::intercept_port_map(intercept)?; + let websocket_url = format!( + "{}/api/v1/kube/sidecar/tunnel/{}/{}", + base_trimmed, environment.id, intercept.workload_id + ); + + Ok(DevboxRouteConfig { + intercept_id: intercept.id, + workload_id: route_workload_id, + auth_token: environment.auth_token.clone(), + websocket_url, + path_pattern: "/*".to_string(), + branch_environment_id: environment.base_environment_id.map(|_| environment.id), + created_at_epoch_seconds: Some(intercept.created_at.timestamp()), + expires_at_epoch_seconds: intercept.stopped_at.map(|dt| dt.timestamp()), + port_mappings, + }) + } + + fn intercept_port_map( + intercept: &lapdev_db_entities::kube_devbox_workload_intercept::Model, + ) -> Result, String> { + let value: serde_json::Value = intercept.port_mappings.clone().into(); + let mappings: Vec = serde_json::from_value(value).map_err(|e| { + format!( + "Failed to parse port mappings for intercept {}: {e}", + intercept.id + ) + })?; + + let mut port_map = HashMap::with_capacity(mappings.len()); + for mapping in &mappings { + port_map.insert(mapping.workload_port, mapping.local_port); + } + + Ok(port_map) + } + + async fn route_workload_id_for_intercept( + &self, + intercept: &lapdev_db_entities::kube_devbox_workload_intercept::Model, + ) -> Result { + let workload = self + .db + .get_environment_workload(intercept.workload_id) + .await + .map_err(|e| { + format!( + "Failed to load workload {} for intercept {}: {e}", + intercept.workload_id, intercept.id + ) + })? + .ok_or_else(|| { + format!( + "Workload {} not found for intercept {}", + intercept.workload_id, intercept.id + ) + })?; + + Ok(workload.base_workload_id.unwrap_or(workload.id)) + } + + fn route_environment_targets( + environment: &lapdev_db_entities::kube_environment::Model, + ) -> (Uuid, Option) { + ( + environment.base_environment_id.unwrap_or(environment.id), + environment.base_environment_id.map(|_| environment.id), + ) + } + + fn cookie_token(&self, cookie: &headers::Cookie, name: &str) -> Result { + let token = cookie.get(name).ok_or(ApiError::Unauthenticated)?; + let untrusted_token = + UntrustedToken::try_from(token).map_err(|_| ApiError::Unauthenticated)?; + let token = pasetors::local::decrypt( + &self.auth_token_key, + &untrusted_token, + &ClaimsValidationRules::new(), + None, + None, + ) + .map_err(|_| ApiError::Unauthenticated)?; + Ok(token) + } + + pub fn auth_state_token(&self, cookie: &headers::Cookie) -> Result { + self.cookie_token(cookie, OAUTH_STATE_COOKIE) + } + + pub fn token(&self, cookie: &headers::Cookie) -> Result { + self.cookie_token(cookie, TOKEN_COOKIE_NAME) + } + + pub async fn require_enterprise(&self) -> Result<(), ApiError> { + if self.conductor.enterprise.has_valid_license().await { + return Ok(()); + } + Err(ApiError::EnterpriseInvalid) + } + + async fn authorize_org( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + ) -> Result< + ( + lapdev_db_entities::user::Model, + lapdev_db_entities::organization_member::Model, + ), + ApiError, + > { + let user = self.authenticate_raw(headers).await?; + let member = self + .db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + Ok((user, member)) + } + + pub async fn authorize( + &self, + headers: &axum::http::HeaderMap, + org_id: Uuid, + required_role: Option, + ) -> Result { + let (user, member) = self.authorize_org(headers, org_id).await?; + + if let Some(required_role) = required_role { + let member_role = + UserRole::from_str(&member.role).map_err(|_| ApiError::Unauthorized)?; + + match required_role { + UserRole::Owner => { + if member_role != UserRole::Owner { + return Err(ApiError::DetailedUnauthorized( + "Owner role required".to_string(), + )); + } + } + UserRole::Admin => { + if member_role != UserRole::Owner && member_role != UserRole::Admin { + return Err(ApiError::DetailedUnauthorized( + "Admin or Owner role required".to_string(), + )); + } + } + UserRole::Member => { + // Any role is sufficient for Member access + } + } + } + + Ok(user) + } + + pub async fn authenticate_raw( + &self, + headers: &axum::http::HeaderMap, + ) -> Result { + let cookie = headers + .typed_get::() + .ok_or(ApiError::Unauthenticated)?; + self.authenticate(&cookie).await + } + + pub async fn authenticate( + &self, + cookie: &headers::Cookie, + ) -> Result { + let token = self.token(cookie)?; + let user_id: Uuid = token + .payload_claims() + .and_then(|c| c.get_claim("user_id")) + .and_then(|v| serde_json::from_value(v.to_owned()).ok()) + .ok_or(ApiError::Unauthenticated)?; + let user = lapdev_db_entities::user::Entity::find_by_id(user_id) + .filter(lapdev_db_entities::user::Column::DeletedAt.is_null()) + .one(&self.db.conn) + .await? + .ok_or(ApiError::Unauthenticated)?; + Ok(user) + } + + pub async fn authenticate_cluster_admin( + &self, + cookie: &headers::Cookie, + ) -> Result { + let user = self.authenticate(cookie).await?; + if !user.cluster_admin { + return Err(ApiError::Unauthorized); + } + Ok(user) + } + + pub async fn get_project( + &self, + cookie: &headers::Cookie, + org_id: Uuid, + project_id: Uuid, + ) -> Result< + ( + lapdev_db_entities::user::Model, + lapdev_db_entities::project::Model, + ), + ApiError, + > { + let user = self.authenticate(cookie).await?; + self.db + .get_organization_member(user.id, org_id) + .await + .map_err(|_| ApiError::Unauthorized)?; + let project = self + .db + .get_project(project_id) + .await + .map_err(|_| ApiError::InvalidRequest("project doesn't exist".to_string()))?; + if project.organization_id != org_id { + return Err(ApiError::Unauthorized); + } + Ok((user, project)) + } +} + +async fn load_certs(db: &DbApi) -> Result>> { + let certs = db.get_config(LAPDEV_CERTS).await?; + let certs: Vec<(String, String)> = serde_json::from_str(&certs)?; + + let mut final_certs = HashMap::new(); + for (cert, key) in certs { + if let Ok((dns_names, cert)) = load_cert(&cert, &key) { + for dns_name in dns_names { + final_certs.insert(dns_name, Arc::new(cert.clone())); + } + } + } + Ok(final_certs) +} diff --git a/lapdev-api/src/websocket.rs b/crates/api/src/websocket.rs similarity index 84% rename from lapdev-api/src/websocket.rs rename to crates/api/src/websocket.rs index 75d7ffd..a98ea4e 100644 --- a/lapdev-api/src/websocket.rs +++ b/crates/api/src/websocket.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use axum::{ extract::{ ws::{Message, WebSocket}, @@ -8,7 +10,6 @@ use axum::{ use axum_extra::headers; use futures_util::StreamExt; use hyper::StatusCode; -use lapdev_db::entities; use lapdev_rpc::error::ApiError; use uuid::Uuid; @@ -19,7 +20,7 @@ pub async fn handle_websocket( query: Option<&str>, websocket: WebSocketUpgrade, cookies: headers::Cookie, - state: CoreState, + state: Arc, ) -> Result { if path == "/ws" { let ws_name = query @@ -44,7 +45,6 @@ pub async fn handle_websocket( state.conductor.cleanup_workspace_updates(ws.id).await; }) .into_response(); - println!("websocket finished"); return Ok(resp); } else if path == "/all_workspaces_ws" { let user = state.authenticate(&cookies).await?; @@ -54,7 +54,6 @@ pub async fn handle_websocket( state.conductor.cleanup_all_workspace_updates(user.id).await; }) .into_response(); - println!("websocket finished"); return Ok(resp); } @@ -67,17 +66,17 @@ async fn handle_workspace_updates(ws_id: Uuid, socket: &mut WebSocket, state: &C tokio::select! { incoming = socket.recv() => { if incoming.is_none() { - println!("websocket closed by client"); + tracing::debug!("websocket closed by client"); return; } } msg = rx.next() => { if let Some(msg) = msg { if let Ok(c) = serde_json::to_string(&msg) { - let _ = socket.send(Message::Text(c)).await; + let _ = socket.send(Message::Text(c.into())).await; } } else { - println!("workspace update messages stopped"); + tracing::debug!("workspace update messages stopped"); return; } } @@ -86,7 +85,7 @@ async fn handle_workspace_updates(ws_id: Uuid, socket: &mut WebSocket, state: &C } async fn handle_all_workspaces_update( - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, socket: &mut WebSocket, state: &CoreState, ) { @@ -95,17 +94,17 @@ async fn handle_all_workspaces_update( tokio::select! { incoming = socket.recv() => { if incoming.is_none() { - println!("websocket closed by client"); + tracing::debug!("websocket closed by client"); return; } } msg = rx.next() => { if let Some(msg) = msg { if let Ok(c) = serde_json::to_string(&msg) { - let _ = socket.send(Message::Text(c)).await; + let _ = socket.send(Message::Text(c.into())).await; } } else { - println!("workspace update messages stopped"); + tracing::debug!("workspace update messages stopped"); return; } } diff --git a/crates/api/src/websocket_socket.rs b/crates/api/src/websocket_socket.rs new file mode 100644 index 0000000..df051d1 --- /dev/null +++ b/crates/api/src/websocket_socket.rs @@ -0,0 +1,73 @@ +use std::{ + io, + pin::Pin, + task::{Context, Poll}, +}; + +use axum::extract::ws::{Message, WebSocket}; +use bytes::Bytes; +use futures::{Sink, Stream}; +use futures_util::StreamExt; +use pin_project::pin_project; + +/// Adapter that exposes an Axum `WebSocket` as a binary sink/stream. +#[pin_project] +pub struct AxumBinarySocket { + #[pin] + inner: WebSocket, +} + +impl AxumBinarySocket { + pub fn new(socket: WebSocket) -> Self { + Self { inner: socket } + } +} + +impl Stream for AxumBinarySocket { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + loop { + match futures::ready!(this.inner.as_mut().poll_next_unpin(cx)) { + Some(Ok(Message::Binary(data))) => return Poll::Ready(Some(Ok(data))), + Some(Ok(Message::Close(_))) => return Poll::Ready(None), + Some(Ok(_)) => continue, + Some(Err(err)) => return Poll::Ready(Some(Err(io::Error::other(err)))), + None => return Poll::Ready(None), + } + } + } +} + +impl Sink for AxumBinarySocket { + type Error = io::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project() + .inner + .poll_ready(cx) + .map_err(io::Error::other) + } + + fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { + self.project() + .inner + .start_send(Message::Binary(item)) + .map_err(io::Error::other) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project() + .inner + .poll_flush(cx) + .map_err(io::Error::other) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project() + .inner + .poll_close(cx) + .map_err(io::Error::other) + } +} diff --git a/lapdev-api/src/workspace.rs b/crates/api/src/workspace.rs similarity index 94% rename from lapdev-api/src/workspace.rs rename to crates/api/src/workspace.rs index c43df07..a88b028 100644 --- a/lapdev-api/src/workspace.rs +++ b/crates/api/src/workspace.rs @@ -1,4 +1,4 @@ -use std::{cmp::Ordering, collections::HashMap, str::FromStr}; +use std::{cmp::Ordering, collections::HashMap, str::FromStr, sync::Arc}; use anyhow::Result; use axum::{ @@ -14,7 +14,7 @@ use lapdev_common::{ AuditAction, AuditResourceKind, NewWorkspace, RepoBuildResult, UpdateWorkspacePort, WorkspaceInfo, WorkspacePort, WorkspaceService, WorkspaceStatus, WorkspaceUpdateEvent, }; -use lapdev_db::{api::LAPDEV_PIN_UNPIN_ERROR, entities}; +use lapdev_db::api::LAPDEV_PIN_UNPIN_ERROR; use lapdev_rpc::error::ApiError; use sea_orm::{ ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, QueryFilter, TransactionTrait, @@ -28,7 +28,7 @@ pub async fn create_workspace( info: RequestInfo, TypedHeader(cookie): TypedHeader, Path(org_id): Path, - State(state): State, + State(state): State>, Json(workspace): Json, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -50,7 +50,7 @@ pub async fn create_workspace( } pub async fn all_workspaces( - State(state): State, + State(state): State>, Path(org_id): Path, TypedHeader(cookie): TypedHeader, ) -> Result>, ApiError> { @@ -121,7 +121,7 @@ pub async fn all_workspaces( pub async fn delete_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -149,7 +149,7 @@ pub async fn delete_workspace( pub async fn get_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, ) -> Result, ApiError> { let user = state.authenticate(&cookie).await?; state @@ -167,9 +167,9 @@ pub async fn get_workspace( } let services = if ws.is_compose && ws.compose_parent.is_none() { - entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::ComposeParent.eq(ws.id)) + lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::ComposeParent.eq(ws.id)) .all(&state.db.conn) .await? .into_iter() @@ -216,7 +216,7 @@ pub async fn get_workspace( pub async fn start_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -250,7 +250,7 @@ pub async fn start_workspace( pub async fn stop_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -284,7 +284,7 @@ pub async fn stop_workspace( pub async fn pin_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -343,7 +343,7 @@ pub async fn pin_workspace( info.user_agent.clone(), ) .await?; - let ws = entities::workspace::ActiveModel { + let ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), pinned: ActiveValue::Set(true), ..Default::default() @@ -368,7 +368,7 @@ pub async fn pin_workspace( pub async fn unpin_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -427,7 +427,7 @@ pub async fn unpin_workspace( info.user_agent.clone(), ) .await?; - let ws = entities::workspace::ActiveModel { + let ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), pinned: ActiveValue::Set(false), ..Default::default() @@ -452,7 +452,7 @@ pub async fn unpin_workspace( pub async fn rebuild_workspace( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, info: RequestInfo, ) -> Result { let user = state.authenticate(&cookie).await?; @@ -480,7 +480,7 @@ pub async fn rebuild_workspace( pub async fn workspace_ports( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name)): Path<(Uuid, String)>, - State(state): State, + State(state): State>, ) -> Result { let user = state.authenticate(&cookie).await?; state @@ -514,7 +514,7 @@ pub async fn workspace_ports( pub async fn update_workspace_port( TypedHeader(cookie): TypedHeader, Path((org_id, workspace_name, port)): Path<(Uuid, String, u16)>, - State(state): State, + State(state): State>, info: RequestInfo, Json(update_workspace_port): Json, ) -> Result { @@ -556,7 +556,7 @@ pub async fn update_workspace_port( info.user_agent.clone(), ) .await?; - entities::workspace_port::ActiveModel { + lapdev_db_entities::workspace_port::ActiveModel { id: ActiveValue::Set(port.id), shared: ActiveValue::Set(update_workspace_port.shared), public: ActiveValue::Set(update_workspace_port.public), diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml new file mode 100644 index 0000000..3f66c13 --- /dev/null +++ b/crates/cli/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "lapdev-cli" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Lapdev CLI - Command line interface for Lapdev" + +[[bin]] +name = "lapdev-cli" +path = "src/main.rs" + +[dependencies] +# Workspace dependencies +clap = { workspace = true, features = ["env"] } +anyhow.workspace = true +thiserror.workspace = true +uuid.workspace = true +chrono.workspace = true +serde.workspace = true +serde_json.workspace = true +reqwest.workspace = true +tokio = { workspace = true, features = ["net", "signal", "io-util", "fs"] } +tracing.workspace = true +tracing-subscriber.workspace = true +tokio-tungstenite.workspace = true +tarpc.workspace = true +lapdev-devbox-rpc.workspace = true +lapdev-rpc.workspace = true +futures.workspace = true +lapdev-tunnel.workspace = true +lapdev-common.workspace = true +quinn.workspace = true + +# CLI-specific dependencies +keyring = { version = "3.6.1", features = ["apple-native", "windows-native", "linux-native"] } +webbrowser = "1.0.2" +colored = "2.1.0" +hostname = "0.4.0" +futures-util = "0.3" +tokio-util = { version = "0.7", features = ["codec"] } +bytes = "1.5" +http = "1.1" +urlencoding = "2.1.3" +tempfile.workspace = true +windows-sys = { version = "0.59.0", features = ["Win32_Storage_FileSystem"] } +tracing-appender.workspace = true +dirs = "6.0.0" diff --git a/crates/cli/src/api/client.rs b/crates/cli/src/api/client.rs new file mode 100644 index 0000000..2bf3f64 --- /dev/null +++ b/crates/cli/src/api/client.rs @@ -0,0 +1,107 @@ +use anyhow::{Context, Result}; +use reqwest::{Client, StatusCode}; +use serde::Deserialize; +use uuid::Uuid; + +pub struct LapdevClient { + client: Client, + base_url: String, + token: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CliTokenResponse { + pub token: String, + pub expires_in: u32, +} + +#[derive(Debug, Deserialize)] +pub struct WhoamiResponse { + pub user_id: Uuid, + pub email: String, + pub name: Option, + pub organization_id: Uuid, + pub device_name: String, + pub authenticated_at: Option, + pub expires_at: Option, +} + +impl LapdevClient { + pub fn new(base_url: String) -> Self { + Self { + client: Client::new(), + base_url, + token: None, + } + } + + pub fn with_token(mut self, token: String) -> Self { + self.token = Some(token); + self + } + + /// Poll for CLI token after browser auth + /// Returns Ok(Some(response)) if token is ready + /// Returns Ok(None) if still pending (404) + /// Returns Err for other errors + pub async fn poll_cli_token(&self, session_id: Uuid) -> Result> { + let url = format!( + "{}/api/v1/auth/cli/token?session_id={}", + self.base_url, session_id + ); + + let resp = self + .client + .get(&url) + .send() + .await + .context("Failed to send token poll request")?; + + match resp.status() { + StatusCode::OK => { + let token_resp = resp + .json() + .await + .context("Failed to parse token response")?; + Ok(Some(token_resp)) + } + StatusCode::NOT_FOUND => Ok(None), + status => Err(anyhow::anyhow!( + "Unexpected status code from server: {}", + status + )), + } + } + + /// Get current user info + pub async fn whoami(&self) -> Result { + let token = self + .token + .as_ref() + .context("Not authenticated. Please run 'lapdev devbox login' first.")?; + + let resp = self + .client + .get(format!("{}/api/v1/devbox/whoami", self.base_url)) + .header("Authorization", format!("Bearer {}", token)) + .send() + .await + .context("Failed to send whoami request")?; + + if !resp.status().is_success() { + let status = resp.status(); + let text = resp.text().await.unwrap_or_default(); + anyhow::bail!( + "Request failed with status {}: {}", + status, + if text.is_empty() { + "No error message" + } else { + &text + } + ); + } + + resp.json().await.context("Failed to parse whoami response") + } +} diff --git a/crates/cli/src/api/mod.rs b/crates/cli/src/api/mod.rs new file mode 100644 index 0000000..b9babe5 --- /dev/null +++ b/crates/cli/src/api/mod.rs @@ -0,0 +1 @@ +pub mod client; diff --git a/crates/cli/src/auth.rs b/crates/cli/src/auth.rs new file mode 100644 index 0000000..1a0fe88 --- /dev/null +++ b/crates/cli/src/auth.rs @@ -0,0 +1,41 @@ +use anyhow::{Context, Result}; + +const SERVICE_NAME: &str = "dev.lap.cli"; + +/// Store authentication token in OS keychain +/// - macOS: Keychain +/// - Linux: Secret Service (GNOME Keyring / KWallet) +/// - Windows: Credential Manager +pub fn store_token(api_host: &str, token: &str) -> Result<()> { + let entry = + keyring::Entry::new(SERVICE_NAME, api_host).context("Failed to create keychain entry")?; + entry + .set_password(token) + .context("Failed to store token in keychain")?; + Ok(()) +} + +/// Retrieve authentication token from OS keychain +pub fn get_token(api_host: &str) -> Result { + let entry = + keyring::Entry::new(SERVICE_NAME, api_host).context("Failed to create keychain entry")?; + let token = entry + .get_password() + .context("No authentication token found. Please run 'lapdev devbox login' first.")?; + Ok(token) +} + +/// Delete authentication token from OS keychain +pub fn delete_token(api_host: &str) -> Result<()> { + let entry = + keyring::Entry::new(SERVICE_NAME, api_host).context("Failed to create keychain entry")?; + entry + .delete_credential() + .context("Failed to delete token from keychain")?; + Ok(()) +} + +/// Check if a token exists in the keychain +pub fn has_token(api_host: &str) -> bool { + get_token(api_host).is_ok() +} diff --git a/crates/cli/src/config.rs b/crates/cli/src/config.rs new file mode 100644 index 0000000..b851eac --- /dev/null +++ b/crates/cli/src/config.rs @@ -0,0 +1,15 @@ +use lapdev_common::utils::resolve_api_host; + +/// Resolve the Lapdev API host and base HTTPS URL. +/// +/// Preference order: +/// 1. CLI argument (`--api-host`) +/// 2. `LAPDEV_API_HOST` environment variable +/// 3. Built-in default (`lapdev_common::LAPDEV_API_HOST`) +pub fn resolve_api_base_url(arg_host: Option) -> (String, String) { + let candidate = arg_host.or_else(|| std::env::var("LAPDEV_API_HOST").ok()); + + let host = resolve_api_host(candidate.as_deref()); + let base_url = format!("https://{}", host); + (host, base_url) +} diff --git a/crates/cli/src/devbox/commands/connect.rs b/crates/cli/src/devbox/commands/connect.rs new file mode 100644 index 0000000..5dc72ab --- /dev/null +++ b/crates/cli/src/devbox/commands/connect.rs @@ -0,0 +1,1344 @@ +use anyhow::{Context, Result}; +use chrono::Utc; +use colored::Colorize; +use futures::StreamExt; +use lapdev_common::devbox::{DirectTunnelConfig, DirectTunnelCredential}; +use lapdev_devbox_rpc::{DevboxClientRpc, DevboxSessionRpcClient}; +use lapdev_rpc::spawn_twoway; +use lapdev_tunnel::direct::DirectEndpoint; +use lapdev_tunnel::{ + run_tunnel_server, websocket_serde_transport, RelayEndpoint, TunnelClient, TunnelError, +}; +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, + sync::Arc, + time::Duration, +}; +use tarpc::server::{BaseChannel, Channel}; +use tokio::{ + signal, + sync::{mpsc, oneshot, Mutex, RwLock}, + task::JoinHandle, + time::{sleep, timeout, MissedTickBehavior}, +}; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; +use uuid::Uuid; + +use crate::{ + auth, + devbox::dns::{ + is_tcp_protocol, HostsManager, ServiceBridge, ServiceBridgeStartReport, ServiceEndpoint, + SyntheticIpAllocator, + }, +}; + +const DEVBOX_HEARTBEAT_INTERVAL_SECS: u64 = 30; +const DEVBOX_HEARTBEAT_TIMEOUT_SECS: u64 = 10; + +#[derive(Debug)] +enum HeartbeatEvent { + Failed(String), + TimedOut, +} + +/// Execute the devbox connect command +pub async fn execute(api_host: &str) -> Result<()> { + let tunnel_manager = DevboxTunnelManager::new(api_host) + .await + .map_err(|err| anyhow::anyhow!("Failed to initialize Devbox tunnel manager: {}", err))?; + + // Load token from keychain, or prompt for login if not found + let token = match auth::get_token(tunnel_manager.api_host()) { + Ok(token) => token, + Err(_) => { + // No token found, run login flow + println!( + "{}", + "No authentication token found. Starting login...".yellow() + ); + println!(); + + // Run login + super::login::execute(tunnel_manager.api_host(), None).await?; + + // Retrieve the newly stored token + auth::get_token(tunnel_manager.api_host()) + .context("Failed to load authentication token after login")? + } + }; + + // Check hosts file permissions early and warn user + tunnel_manager.check_and_warn_permissions(); + + let mut backoff = Duration::from_secs(1); + let mut is_first_connection = true; + + { + let tunnel_manager = tunnel_manager.clone(); + loop { + match tunnel_manager + .connect_and_run_rpc(&token, is_first_connection) + .await + { + Ok(should_exit) => { + if should_exit { + // Clean exit (Ctrl+C or session displaced) + break; + } + // Otherwise fall through to retry + tracing::info!("RPC connection closed, will retry..."); + } + Err(e) => { + if is_first_connection { + // On first connection, fail fast for auth errors + return Err(e); + } + eprintln!("{} Connection error: {}", "⚠".yellow(), e); + } + } + + is_first_connection = false; + + // Wait before reconnecting + println!( + "{} Reconnecting in {} seconds...", + "🔄".cyan(), + backoff.as_secs() + ); + tokio::select! { + _ = signal::ctrl_c() => { + println!("\n{}", "Received Ctrl+C, disconnecting...".yellow()); + break; + } + _ = sleep(backoff) => { + backoff = (backoff.saturating_mul(2)).min(Duration::from_secs(30)); + } + } + } + } + + tunnel_manager.cleanup_dns().await; + tunnel_manager.shutdown().await; + + Ok(()) +} + +/// RPC server implementation for the CLI +/// This handles incoming calls from the server +#[derive(Clone)] +struct DevboxClientRpcServer { + shutdown_tx: mpsc::UnboundedSender, + env_change_tx: mpsc::UnboundedSender>, + tunnel_manager: DevboxTunnelManager, +} + +impl DevboxClientRpcServer { + fn new( + shutdown_tx: mpsc::UnboundedSender, + env_change_tx: mpsc::UnboundedSender>, + tunnel_manager: DevboxTunnelManager, + ) -> Self { + Self { + shutdown_tx, + env_change_tx, + tunnel_manager, + } + } +} + +impl DevboxClientRpc for DevboxClientRpcServer { + async fn session_displaced(self, _context: tarpc::context::Context, new_device_name: String) { + let shutdown_tx = self.shutdown_tx; + + let _ = shutdown_tx.send(ShutdownSignal::Displaced(new_device_name.clone())); + + tracing::warn!("Session displaced by: {}", new_device_name); + } + + async fn environment_changed( + self, + _context: tarpc::context::Context, + environment: Option, + ) { + tracing::info!("environment_changed RPC handler called"); + let env_change_tx = self.env_change_tx; + + if let Some(ref env) = environment { + println!( + "\n{} Environment changed to: {} ({} / {})", + "🔄".cyan(), + env.environment_name.bright_white(), + env.cluster_name.cyan(), + env.namespace.cyan() + ); + tracing::info!( + "Active environment changed to: {} ({})", + env.environment_id, + env.namespace + ); + } else { + println!("\n{} Active environment cleared", "🔄".cyan()); + tracing::info!("Active environment cleared"); + } + + match env_change_tx.send(environment.clone()) { + Ok(_) => { + tracing::info!("Successfully sent environment change to main loop"); + } + Err(e) => { + tracing::error!("Failed to send environment change to main loop: {:?}", e); + } + } + } + + async fn ping(self, _context: tarpc::context::Context) -> Result<(), String> { + tracing::trace!("Received ping"); + Ok(()) + } + + async fn request_direct_config( + self, + _context: tarpc::context::Context, + user_id: Uuid, + stun_observed_addr: Option, + ) -> Result, String> { + self.tunnel_manager + .build_direct_channel_config(user_id, stun_observed_addr) + .await + } +} + +enum ShutdownSignal { + Displaced(String), +} + +#[derive(Clone)] +struct DevboxTunnelManager { + api_host: Arc, + ws_base: Arc, + intercept_task: Arc>>, + client_task: Arc>>, + active_environment: Arc>>, + direct_endpoint: Arc, + /// Shared tunnel client for DNS service bridge + tunnel_client: Arc>>>, + /// Service bridge for DNS resolution + service_bridge: Arc, + /// Hosts file manager + hosts_manager: Arc, + /// IP allocator + ip_allocator: Arc>, + client_token: Arc>>, + client_user_id: Arc>>, +} + +impl DevboxTunnelManager { + async fn new(api_host: impl Into) -> Result { + let api_host = api_host.into(); + let api_host = api_host.trim().trim_end_matches('/').to_string(); + let ws_base = format!("wss://{}", api_host); + let tunnel_client = Arc::new(RwLock::new(None)); + let direct_endpoint = Arc::new(DirectEndpoint::bind().await?); + + { + let direct_endpoint = direct_endpoint.clone(); + tokio::spawn(async move { + direct_endpoint.start_tunnel_server().await; + }); + } + + Ok(Self { + api_host: Arc::new(api_host), + ws_base: Arc::new(ws_base), + intercept_task: Arc::new(Mutex::new(None)), + client_task: Arc::new(Mutex::new(None)), + active_environment: Arc::new(Mutex::new(None)), + direct_endpoint, + tunnel_client: tunnel_client.clone(), + service_bridge: Arc::new(ServiceBridge::new(tunnel_client)), + hosts_manager: Arc::new(HostsManager::new()), + ip_allocator: Arc::new(Mutex::new(SyntheticIpAllocator::new())), + client_token: Arc::new(RwLock::new(None)), + client_user_id: Arc::new(RwLock::new(None)), + }) + } + + fn api_host(&self) -> &str { + self.api_host.as_str() + } + + fn ws_base(&self) -> &str { + self.ws_base.as_str() + } + + async fn connect_and_run_rpc(&self, token: &str, is_first_connection: bool) -> Result { + if is_first_connection { + println!("{}", "🔌 Connecting to Lapdev devbox...".cyan()); + } else { + println!("{}", "🔌 Reconnecting to Lapdev devbox...".cyan()); + } + + // Construct WebSocket URL + let ws_url = format!("{}/api/v1/kube/devbox/rpc", self.ws_base()); + + tracing::info!("Connecting to: {}", ws_url); + + // Create WebSocket request with authentication + let mut request = ws_url + .into_client_request() + .context("Failed to create WebSocket request")?; + + // Add authentication header + let headers = request.headers_mut(); + headers.insert( + "Authorization", + format!("Bearer {}", token) + .parse() + .context("Failed to parse authorization header")?, + ); + + // TODO: Add client version header + // headers.insert("X-Lapdev-Client-Version", env!("CARGO_PKG_VERSION").parse()?); + + // Connect WebSocket + let (stream, _response) = tokio_tungstenite::connect_async(request) + .await + .context("Failed to establish WebSocket connection")?; + + println!("{}", "✓ WebSocket connection established".green()); + + // Create transport layer + // Set up bidirectional RPC over the websocket stream + let transport = + websocket_serde_transport(stream, tarpc::tokio_serde::formats::Bincode::default()); + let (server_chan, client_chan, _abort_handle) = spawn_twoway(transport); + + // Create channels for this connection + let (shutdown_tx, mut shutdown_rx) = mpsc::unbounded_channel::(); + let (env_change_tx, mut env_change_rx) = + mpsc::unbounded_channel::>(); + + // Create RPC client (for calling server methods) + let rpc_client = + DevboxSessionRpcClient::new(tarpc::client::Config::default(), client_chan).spawn(); + + // Call whoami to get session info + let session_info = match rpc_client + .whoami(tarpc::context::current()) + .await + .context("RPC call failed")? + { + Ok(info) => { + println!( + "{} Connected as {} ({})", + "✓".green(), + info.email.bright_white().bold(), + info.device_name.cyan() + ); + println!( + " Session expires: {}", + info.expires_at + .format("%Y-%m-%d %H:%M:%S UTC") + .to_string() + .dimmed() + ); + info + } + Err(e) => { + eprintln!("{} Failed to get session info: {}", "✗".red(), e); + return Err(anyhow::anyhow!("Authentication failed: {}", e)); + } + }; + + // Create RPC server (for server to call us) + let client_rpc_server = + DevboxClientRpcServer::new(shutdown_tx.clone(), env_change_tx.clone(), self.clone()); + + // Spawn the RPC server task + let server_task = tokio::spawn(async move { + tracing::info!("Starting DevboxClientRpc server..."); + BaseChannel::with_defaults(server_chan) + .execute(client_rpc_server.serve()) + .for_each(|resp| async move { + tokio::spawn(resp); + }) + .await; + tracing::info!("DevboxClientRpc server stopped"); + }); + + self.ensure_client(&rpc_client, token, session_info.user_id) + .await + .map_err(|e| anyhow::anyhow!("Failed to start client tunnel: {}", e))?; + + if let Err(err) = self + .ensure_intercept(&rpc_client, token, session_info.user_id) + .await + { + tracing::error!( + user_id = %session_info.user_id, + "Failed to start intercept tunnel: {}", + err + ); + } + + { + let rpc_client = rpc_client.clone(); + let env_change_tx = env_change_tx.clone(); + tokio::spawn(async move { + let result = rpc_client + .get_active_environment(tarpc::context::current()) + .await; + match result { + Ok(Ok(env)) => { + if env_change_tx.send(env).is_err() { + tracing::debug!( + "Failed to deliver initial active environment; receiver dropped" + ); + } + } + Ok(Err(err)) => { + eprintln!( + "{} Failed to fetch active environment: {}", + "⚠".yellow(), + err + ); + } + Err(err) => { + eprintln!( + "{} Failed to fetch active environment: {}", + "⚠".yellow(), + err + ); + } + } + }); + } + + if is_first_connection { + println!("{}", "\n✓ Connected and ready".green()); + println!("{}", " Press Ctrl+C to disconnect".dimmed()); + } else { + println!("{}", "✓ Reconnected successfully".green()); + } + + let mut server_task = server_task; + let mut should_exit = false; + let (heartbeat_event_tx, mut heartbeat_event_rx) = + mpsc::unbounded_channel::(); + let (mut heartbeat_task, heartbeat_cancel_tx) = Self::spawn_heartbeat_task( + rpc_client.clone(), + session_info.user_id, + heartbeat_event_tx, + ); + + loop { + tokio::select! { + heartbeat_event = heartbeat_event_rx.recv() => { + self.handle_heartbeat_event(session_info.user_id, heartbeat_event); + break; + } + res = &mut server_task => { + self.handle_server_task_result(res); + break; + } + _ = signal::ctrl_c() => { + println!("\n{}", "Received Ctrl+C, disconnecting...".yellow()); + should_exit = true; + break; + } + maybe_signal = shutdown_rx.recv() => { + if self.handle_shutdown_signal(maybe_signal) { + should_exit = true; + } + break; + } + notification = env_change_rx.recv() => { + match notification { + Some(env) => { + tracing::info!("Event loop received environment change notification"); + self + .handle_environment_notification(env, &rpc_client, &session_info) + .await; + } + None => { + tracing::warn!("Environment change channel closed unexpectedly"); + break; + } + } + } + } + } + + if !server_task.is_finished() { + server_task.abort(); + } + if !heartbeat_task.is_finished() { + let _ = heartbeat_cancel_tx.send(()); + } + if let Err(err) = heartbeat_task.await { + tracing::warn!("Heartbeat task join error: {}", err); + } + + // Return true if we should exit completely, false to retry connection + Ok(should_exit) + } + + fn handle_heartbeat_event(&self, user_id: Uuid, event: Option) { + match event { + Some(HeartbeatEvent::Failed(err)) => { + tracing::warn!( + user_id = %user_id, + error = %err, + "Devbox session heartbeat failed" + ); + eprintln!( + "{} Connection heartbeat lost ({}), reconnecting...", + "⚠".yellow(), + err + ); + } + Some(HeartbeatEvent::TimedOut) => { + tracing::warn!( + user_id = %user_id, + timeout_secs = DEVBOX_HEARTBEAT_TIMEOUT_SECS, + "Devbox session heartbeat timed out" + ); + eprintln!( + "{} Connection heartbeat timed out, reconnecting...", + "⚠".yellow() + ); + } + None => { + tracing::warn!( + user_id = %user_id, + "Heartbeat channel closed unexpectedly" + ); + eprintln!("{} Heartbeat channel closed, reconnecting...", "⚠".yellow()); + } + } + } + + fn handle_server_task_result(&self, result: Result<(), tokio::task::JoinError>) { + match result { + Ok(()) => { + tracing::info!("RPC server task completed normally"); + } + Err(err) => { + tracing::warn!("RPC server task failed: {}", err); + } + } + } + + fn handle_shutdown_signal(&self, signal: Option) -> bool { + match signal { + Some(ShutdownSignal::Displaced(device)) => { + println!( + "\n{} Session displaced by new login from: {}", + "⚠".yellow(), + device.bright_white() + ); + true + } + None => { + tracing::warn!("Shutdown channel closed unexpectedly"); + false + } + } + } + + async fn handle_environment_notification( + &self, + environment: Option, + rpc_client: &DevboxSessionRpcClient, + session_info: &lapdev_devbox_rpc::DevboxSessionInfo, + ) { + { + *self.active_environment.lock().await = environment.as_ref().map(|e| e.environment_id); + } + + match environment { + Some(env) => { + self.process_active_environment(env, rpc_client, session_info) + .await; + } + None => { + self.process_environment_cleared().await; + } + } + } + + async fn process_active_environment( + &self, + env: lapdev_devbox_rpc::DevboxEnvironmentInfo, + rpc_client: &DevboxSessionRpcClient, + session_info: &lapdev_devbox_rpc::DevboxSessionInfo, + ) { + println!( + " Active environment: {} ({} / {})", + env.environment_name.bright_white(), + env.cluster_name.cyan(), + env.namespace.cyan() + ); + + self.print_device_intercepts(env.environment_id, rpc_client, session_info) + .await; + + tracing::info!("Setting up DNS for environment {}", env.environment_id); + match self + .setup_dns_for_environment(env.environment_id, rpc_client) + .await + { + Ok(()) => tracing::info!("DNS setup completed successfully"), + Err(err) => eprintln!("{} Failed to refresh DNS: {}", "⚠".yellow(), err), + } + + match self.restart_client_tunnel(rpc_client).await { + Ok(()) => { + tracing::info!( + environment_id = %env.environment_id, + "Client tunnel restarted after environment change" + ); + } + Err(err) => { + tracing::warn!( + environment_id = %env.environment_id, + error = %err, + "Failed to restart client tunnel after environment change" + ); + } + } + } + + async fn process_environment_cleared(&self) { + println!("{} No active environment selected", "ℹ".blue()); + tracing::info!("Clearing DNS entries"); + println!("{} Clearing DNS entries...", "🔧".cyan()); + self.cleanup_dns().await; + } + + async fn print_device_intercepts( + &self, + env_id: Uuid, + rpc_client: &DevboxSessionRpcClient, + session_info: &lapdev_devbox_rpc::DevboxSessionInfo, + ) { + match rpc_client + .list_workload_intercepts(tarpc::context::current(), env_id) + .await + { + Ok(Ok(intercepts)) => { + for intercept in intercepts { + if intercept.device_name != session_info.device_name { + continue; + } + let port_details = if intercept.port_mappings.is_empty() { + "no port mappings".to_string() + } else { + intercept + .port_mappings + .iter() + .map(|mapping| { + format!( + "{} {} -> localhost:{}", + mapping.protocol.to_uppercase(), + mapping.workload_port, + mapping.local_port + ) + }) + .collect::>() + .join(", ") + }; + println!( + " {} Intercept active for {}/{} ({})", + "↻".green(), + intercept.namespace.bright_white(), + intercept.workload_name.cyan(), + port_details + ); + } + } + Ok(Err(err)) => { + eprintln!("{} Failed to list intercepts: {}", "⚠".yellow(), err); + } + Err(err) => { + eprintln!("{} Failed to list intercepts: {}", "⚠".yellow(), err); + } + } + } + + /// Register a newly connected tunnel client and update shared state + async fn set_tunnel_client(&self, client: Arc) { + { + let mut guard = self.tunnel_client.write().await; + *guard = Some(Arc::clone(&client)); + } + } + + /// Clear the stored tunnel client if it matches the provided instance + async fn clear_tunnel_client(&self, client: &Arc) { + let mut guard = self.tunnel_client.write().await; + if guard + .as_ref() + .map_or(false, |current| Arc::ptr_eq(current, client)) + { + *guard = None; + } + } + + /// Check hosts file permissions and warn user if insufficient + fn check_and_warn_permissions(&self) { + if !self.hosts_manager.check_permissions() { + eprintln!(); + eprintln!( + "{} {}", + "⚠".yellow(), + "Insufficient permissions to modify hosts file" + .yellow() + .bold() + ); + eprintln!( + " Service DNS resolution will not work without write access to the hosts file." + ); + eprintln!(); + if cfg!(unix) { + eprintln!(" To fix this, run the command with sudo:"); + eprintln!(" {}", "sudo -E lapdev devbox connect".bright_white()); + } else if cfg!(windows) { + eprintln!(" To fix this, run the command as Administrator:"); + eprintln!(" Right-click the terminal and select 'Run as Administrator'"); + } + eprintln!(); + eprintln!( + " Alternatively, you can manually add entries to the hosts file when prompted." + ); + eprintln!(); + } + } + + /// Set up DNS for an environment by fetching services and configuring hosts/bridge + async fn setup_dns_for_environment( + &self, + env_id: Uuid, + rpc_client: &DevboxSessionRpcClient, + ) -> Result<()> { + println!("{} Setting up DNS for services...", "🔧".cyan()); + + // Check hosts file permissions upfront + if !self.hosts_manager.check_permissions() { + eprintln!("{} No write permission for hosts file", "⚠".yellow()); + eprintln!( + " DNS will not work until you grant permissions or manually update the hosts file." + ); + if cfg!(unix) { + eprintln!(" Try running with: sudo -E lapdev devbox connect"); + } else if cfg!(windows) { + eprintln!(" Try running as Administrator"); + } + eprintln!(); + } + + // Fetch services for the environment + let services = match rpc_client + .list_services(tarpc::context::current(), env_id) + .await + .context("RPC call failed")? + { + Ok(services) => services, + Err(e) => { + eprintln!("{} Failed to fetch services: {}", "⚠".yellow(), e); + return Ok(()); + } + }; + + if services.is_empty() { + println!("{} No services found in environment", "ℹ".blue()); + return Ok(()); + } + + // Allocate synthetic IPs and create endpoints + let mut bridge_endpoints = Vec::new(); + let mut hosts_endpoints = Vec::new(); + let mut seen_services = HashSet::new(); + let mut skipped_services = HashSet::new(); + let mut skipped_endpoints = 0usize; + let mut skipped_non_tcp: HashMap> = HashMap::new(); + let mut allocator = self.ip_allocator.lock().await; + allocator.clear(); + + for service in &services { + for port in &service.ports { + if !is_tcp_protocol(&port.protocol) { + skipped_non_tcp + .entry(format!("{}.{}", service.name, service.namespace)) + .or_default() + .push((port.port, port.protocol.clone())); + continue; + } + + let service_key = format!("{}.{}", service.name, service.namespace); + if let Some(ip) = allocator.allocate(&service.name, &service.namespace) { + let endpoint = + ServiceEndpoint::new(service, port.port, port.protocol.clone(), ip); + + if seen_services.insert(service_key) { + hosts_endpoints.push(endpoint.clone()); + } + + bridge_endpoints.push(endpoint); + } else { + skipped_endpoints += 1; + if skipped_services.insert(service_key.clone()) { + tracing::warn!( + service = %service.name, + namespace = %service.namespace, + "Synthetic IP pool exhausted; skipping service" + ); + } + } + } + } + drop(allocator); + + if !skipped_non_tcp.is_empty() { + let skipped_port_count: usize = skipped_non_tcp.values().map(|ports| ports.len()).sum(); + let service_count = skipped_non_tcp.len(); + eprintln!( + "{} Skipped {} non-TCP port(s) across {} service(s); UDP forwarding is not supported.", + "ℹ".blue(), + skipped_port_count, + service_count + ); + for (service, ports) in skipped_non_tcp.iter() { + let formatted_ports: Vec = ports + .iter() + .map(|(port, protocol)| format!("{}/{}", port, protocol.to_uppercase())) + .collect(); + eprintln!(" {}: {}", service, formatted_ports.join(", ")); + } + } + + println!( + " {} service endpoint(s) allocated ({} host entry/entries)", + bridge_endpoints.len(), + hosts_endpoints.len() + ); + + if !skipped_services.is_empty() { + eprintln!( + "{} Synthetic IP pool exhausted. Skipped {} endpoint(s) across {} service(s).", + "⚠".yellow(), + skipped_endpoints, + skipped_services.len() + ); + eprintln!( + " Active connections may be incomplete. Please try reducing DNS scope or filing a support ticket." + ); + } + + let bridge_report: ServiceBridgeStartReport = + self.service_bridge.start(bridge_endpoints).await; + + if !bridge_report.failed.is_empty() { + eprintln!( + "{} Failed to bind {} endpoint(s). These services will remain unresolved locally:", + "⚠".yellow(), + bridge_report.failed.len() + ); + for (endpoint, err) in &bridge_report.failed { + eprintln!( + " {}/{}:{} ({}) - {}", + endpoint.namespace, + endpoint.service_name, + endpoint.port, + endpoint.protocol, + err + ); + } + } + + if !bridge_report.started.is_empty() { + println!( + "{} Service bridge started ({} endpoint(s) active)", + "✓".green(), + bridge_report.started.len() + ); + } else { + println!( + "{} Service bridge not running; no endpoints could be activated", + "⚠".yellow() + ); + } + + let active_service_keys: HashSet = bridge_report + .started + .iter() + .map(|endpoint| format!("{}.{}", endpoint.service_name, endpoint.namespace)) + .collect(); + + let mut effective_hosts = hosts_endpoints; + effective_hosts.retain(|endpoint| { + let key = format!("{}.{}", endpoint.service_name, endpoint.namespace); + active_service_keys.contains(&key) + }); + + if effective_hosts.len() < active_service_keys.len() { + tracing::warn!( + filtered_services = active_service_keys.len() - effective_hosts.len(), + "Filtered out host entries for services without active listeners" + ); + } + + // Update hosts file to reflect active listeners only + match self.hosts_manager.write_entries(&effective_hosts).await { + Ok(()) => { + if effective_hosts.is_empty() { + println!( + "{} No hosts entries written (no active listeners)", + "ℹ".blue() + ); + } else { + println!( + "{} Hosts file updated with {} service entr{}", + "✓".green(), + effective_hosts.len(), + if effective_hosts.len() == 1 { + "y" + } else { + "ies" + } + ); + } + } + Err(e) => { + eprintln!("{} Failed to update hosts file: {}", "⚠".yellow(), e); + if !effective_hosts.is_empty() { + self.hosts_manager + .print_manual_instructions(&effective_hosts); + } + } + } + + Ok(()) + } + + /// Cleanup DNS entries + async fn cleanup_dns(&self) { + if let Err(e) = self.hosts_manager.remove_entries().await { + tracing::warn!("Failed to clean up hosts file: {}", e); + } + self.service_bridge.stop().await; + self.ip_allocator.lock().await.clear(); + } + + async fn send_session_heartbeat(rpc_client: &DevboxSessionRpcClient) -> Result<(), String> { + match rpc_client.heartbeat(tarpc::context::current()).await { + Ok(Ok(())) => Ok(()), + Ok(Err(err)) => Err(err), + Err(err) => Err(err.to_string()), + } + } + + fn spawn_heartbeat_task( + rpc_client: DevboxSessionRpcClient, + user_id: Uuid, + event_tx: mpsc::UnboundedSender, + ) -> (JoinHandle<()>, oneshot::Sender<()>) { + let (cancel_tx, cancel_rx) = oneshot::channel(); + let task = tokio::spawn(async move { + Self::heartbeat_loop(rpc_client, user_id, event_tx, cancel_rx).await; + }); + (task, cancel_tx) + } + + async fn heartbeat_loop( + rpc_client: DevboxSessionRpcClient, + user_id: Uuid, + event_tx: mpsc::UnboundedSender, + mut cancel_rx: oneshot::Receiver<()>, + ) { + let mut heartbeat_interval = + tokio::time::interval(Duration::from_secs(DEVBOX_HEARTBEAT_INTERVAL_SECS)); + heartbeat_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + loop { + tokio::select! { + _ = &mut cancel_rx => { + tracing::debug!(user_id = %user_id, "Heartbeat loop cancelled"); + break; + } + _ = heartbeat_interval.tick() => { + let heartbeat_result = timeout( + Duration::from_secs(DEVBOX_HEARTBEAT_TIMEOUT_SECS), + Self::send_session_heartbeat(&rpc_client) + ) + .await; + + match heartbeat_result { + Ok(Ok(())) => {} + Ok(Err(err)) => { + let _ = event_tx.send(HeartbeatEvent::Failed(err)); + break; + } + Err(_) => { + let _ = event_tx.send(HeartbeatEvent::TimedOut); + break; + } + } + } + } + } + } +} + +struct TunnelTask { + kind: TunnelKind, + intercept_id: Option, + shutdown: oneshot::Sender<()>, + handle: JoinHandle<()>, +} + +#[derive(Copy, Clone)] +enum TunnelKind { + Intercept, + Client, +} + +impl TunnelKind { + fn path(self) -> &'static str { + match self { + TunnelKind::Intercept => "intercept", + TunnelKind::Client => "client", + } + } + + fn as_str(self) -> &'static str { + self.path() + } +} + +impl DevboxTunnelManager { + async fn ensure_intercept( + &self, + rpc_client: &DevboxSessionRpcClient, + token: &str, + user_id: Uuid, + ) -> Result<(), String> { + let mut guard = self.intercept_task.lock().await; + if guard.is_some() { + return Ok(()); + } + + let task = self.spawn_tunnel_task(rpc_client, TunnelKind::Intercept, token, user_id, None); + + *guard = Some(task); + Ok(()) + } + + async fn ensure_client( + &self, + rpc_client: &DevboxSessionRpcClient, + token: &str, + user_id: Uuid, + ) -> Result<(), String> { + let mut guard = self.client_task.lock().await; + if guard.is_some() { + return Ok(()); + } + + { + let mut stored_token = self.client_token.write().await; + *stored_token = Some(token.to_string()); + } + + { + let mut stored_user = self.client_user_id.write().await; + *stored_user = Some(user_id); + } + + let task = self.spawn_tunnel_task(rpc_client, TunnelKind::Client, token, user_id, None); + *guard = Some(task); + Ok(()) + } + + async fn stop_client(&self) { + if let Some(task) = self.client_task.lock().await.take() { + Self::stop_task(task, "Devbox client tunnel").await; + } + } + + async fn restart_client_tunnel( + &self, + rpc_client: &DevboxSessionRpcClient, + ) -> Result<(), String> { + self.stop_client().await; + let token = { self.client_token.read().await.clone() }; + let user_id = { self.client_user_id.read().await.clone() }; + match (token, user_id) { + (Some(token), Some(user_id)) => self.ensure_client(rpc_client, &token, user_id).await, + _ => Ok(()), + } + } + + async fn build_direct_channel_config( + &self, + user_id: Uuid, + stun_observed_addr: Option, + ) -> Result, String> { + let active_user = { self.client_user_id.read().await.clone() }; + let Some(current_user) = active_user else { + return Err("No active devbox client session available".to_string()); + }; + + if current_user != user_id { + return Err("Requesting user does not match active session".to_string()); + } + + let Some(server_observed_addr) = self.direct_endpoint.observed_addr() else { + tracing::warn!("Direct endpoint has no STUN observed address; cannot provide config"); + return Ok(None); + }; + + let token = Uuid::new_v4().to_string(); + let expires_at = Utc::now() + chrono::Duration::minutes(2); + self.direct_endpoint + .credential() + .insert(token.clone(), expires_at) + .await; + + let config = DirectTunnelConfig { + credential: DirectTunnelCredential { token, expires_at }, + server_certificate: Some(self.direct_endpoint.server_certificate().to_vec()), + stun_observed_addr: Some(server_observed_addr), + }; + + if let Some(addr) = stun_observed_addr { + let direct_endpoint = self.direct_endpoint.clone(); + tokio::spawn(async move { + if let Err(err) = direct_endpoint.send_probe(addr, false).await { + tracing::warn!( + %addr, + error = %err, + "Failed to send probe packet to requesting sidecar" + ); + } + }); + } + + Ok(Some(config)) + } + + async fn stop_intercept(&self) { + if let Some(task) = self.intercept_task.lock().await.take() { + Self::stop_task(task, "Devbox intercept tunnel").await; + } + } + + async fn shutdown(&self) { + self.stop_intercept().await; + self.stop_client().await; + } + + async fn stop_task(task: TunnelTask, context: &str) { + let TunnelTask { + kind, + intercept_id, + shutdown, + handle, + } = task; + let _ = shutdown.send(()); + if let Err(err) = handle.await { + tracing::warn!( + tunnel_kind = kind.as_str(), + intercept_id = intercept_id.map(|id| id.to_string()), + error = %err, + "{} task exited with error", + context + ); + } + } + + fn spawn_tunnel_task( + &self, + rpc_client: &DevboxSessionRpcClient, + kind: TunnelKind, + token: &str, + user_id: Uuid, + intercept_id: Option, + ) -> TunnelTask { + let manager = self.clone(); + + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let token = token.to_string(); + let rpc_client = rpc_client.clone(); + let handle = tokio::spawn(async move { + manager + .run_tunnel_loop(&rpc_client, kind, token, user_id, intercept_id, shutdown_rx) + .await; + }); + + TunnelTask { + kind, + intercept_id, + shutdown: shutdown_tx, + handle, + } + } +} + +impl DevboxTunnelManager { + async fn run_tunnel_loop( + &self, + rpc_client: &DevboxSessionRpcClient, + kind: TunnelKind, + token: String, + user_id: Uuid, + intercept_id: Option, + mut shutdown_rx: oneshot::Receiver<()>, + ) { + let ws_url = format!( + "{}/api/v1/kube/devbox/tunnel/{}/{}", + self.ws_base(), + kind.path(), + user_id + ); + let intercept_id_str = intercept_id.map(|id| id.to_string()); + + let mut backoff = Duration::from_secs(1); + + loop { + tokio::select! { + _ = &mut shutdown_rx => { + tracing::info!( + %user_id, + tunnel_kind = kind.as_str(), + intercept_id = intercept_id_str.as_deref(), + "Devbox tunnel shutdown signal received" + ); + break; + } + result = self.connect_and_run_tunnel(rpc_client, kind, &ws_url, &token) => { + match result { + Ok(()) => { + tracing::info!( + %user_id, + intercept_id = intercept_id_str.as_deref(), + "Devbox {} tunnel closed gracefully", kind.as_str() + ); + backoff = Duration::from_secs(1); + } + Err(err) => { + tracing::warn!( + %user_id, + intercept_id = intercept_id_str.as_deref(), + "Devbox {} tunnel disconnected: {}", kind.as_str(), + err + ); + backoff = (backoff.saturating_mul(2)).min(Duration::from_secs(30)); + } + } + + tokio::select! { + _ = &mut shutdown_rx => { + tracing::info!( + %user_id, + intercept_id = intercept_id_str.as_deref(), + "Devbox {} tunnel shutdown signal received", kind.as_str() + ); + break; + } + _ = sleep(backoff) => {} + } + } + } + } + } + + async fn connect_and_run_tunnel( + &self, + rpc_client: &DevboxSessionRpcClient, + kind: TunnelKind, + ws_url: &str, + token: &str, + ) -> Result<(), TunnelError> { + let mut request = ws_url + .into_client_request() + .map_err(tunnel_transport_error)?; + + let header = format!("Bearer {}", token) + .parse() + .map_err(tunnel_transport_error)?; + request + .headers_mut() + .insert(http::header::AUTHORIZATION, header); + + let (stream, _) = tokio_tungstenite::connect_async(request) + .await + .map_err(tunnel_transport_error)?; + + tracing::info!("Devbox {} tunnel connected: {}", kind.as_str(), ws_url); + + if matches!(kind, TunnelKind::Client) { + let stun_observed_addr = self.direct_endpoint.observed_addr(); + let direct_config = { + let active_environment = { *self.active_environment.lock().await }; + if let Some(environment_id) = active_environment { + rpc_client + .request_direct_client_config( + tarpc::context::current(), + environment_id, + stun_observed_addr, + ) + .await + .ok() + .and_then(|r| r.ok()) + .flatten() + } else { + None + } + }; + let (client, mode) = TunnelClient::connect_with_direct_or_relay::<_, _, _, _>( + direct_config.as_ref(), + &self.direct_endpoint, + || async { Ok(stream) }, + |err| { + tracing::warn!( + error = %err, + "Direct devbox client tunnel attempt failed; falling back to relay" + ); + }, + ) + .await?; + let client = Arc::new(client); + match mode { + lapdev_tunnel::TunnelMode::Direct => { + tracing::info!("Devbox client tunnel using direct QUIC transport"); + } + lapdev_tunnel::TunnelMode::Relay => { + tracing::info!("Devbox client tunnel using API relay transport"); + } + } + + self.set_tunnel_client(Arc::clone(&client)).await; + + client.closed().await; + tracing::info!("Tunnel client closed for DNS service bridge"); + + self.clear_tunnel_client(&client).await; + Ok(()) + } else { + let transport = RelayEndpoint::client_connection(stream).await?; + run_tunnel_server(transport).await + } + } +} + +fn tunnel_transport_error(err: E) -> TunnelError +where + E: std::fmt::Display, +{ + TunnelError::Transport(std::io::Error::new( + std::io::ErrorKind::Other, + err.to_string(), + )) +} diff --git a/crates/cli/src/devbox/commands/login.rs b/crates/cli/src/devbox/commands/login.rs new file mode 100644 index 0000000..5db2848 --- /dev/null +++ b/crates/cli/src/devbox/commands/login.rs @@ -0,0 +1,122 @@ +use anyhow::{Context, Result}; +use colored::Colorize; +use urlencoding::encode; +use uuid::Uuid; + +use crate::{api::client::LapdevClient, auth}; + +const POLL_INTERVAL_SECONDS: u64 = 2; +const TIMEOUT_SECONDS: u64 = 300; // 5 minutes + +pub async fn execute(api_host: &str, device_name: Option) -> Result<()> { + // Check if already logged in + if auth::has_token(api_host) { + println!( + "{}", + "Already authenticated. Use 'lapdev logout' to sign out.".yellow() + ); + return Ok(()); + } + + // Get or generate device name + let device_name = device_name.unwrap_or_else(|| { + hostname::get() + .ok() + .and_then(|h| h.into_string().ok()) + .unwrap_or_else(|| "Unknown Device".to_string()) + }); + + // Generate session ID + let session_id = Uuid::new_v4(); + + // Build auth URL + let base_url = format!("https://{}", api_host); + let auth_url = format!( + "{}/auth/cli?session_id={}&device_name={}", + base_url, + session_id, + encode(&device_name) + ); + + println!("{}", "Opening browser for authentication...".cyan()); + println!(); + println!("If browser doesn't open automatically, visit:"); + println!(" {}", auth_url.bright_blue().underline()); + println!(); + + // Open browser + if let Err(e) = webbrowser::open(&auth_url) { + tracing::warn!("Failed to open browser: {}", e); + println!( + "{}", + "Could not open browser automatically. Please open the URL above manually.".yellow() + ); + } + + println!("{}", "Waiting for authentication...".cyan()); + + // Poll for token + let client = LapdevClient::new(base_url.clone()); + let start_time = std::time::Instant::now(); + let mut poll_count = 0; + + loop { + // Check timeout + if start_time.elapsed().as_secs() > TIMEOUT_SECONDS { + eprintln!("{}", "Authentication timeout. Please try again.".red()); + anyhow::bail!("Authentication timed out after {} seconds", TIMEOUT_SECONDS); + } + + // Poll for token + match client.poll_cli_token(session_id).await { + Ok(Some(response)) => { + // Success! Store the token + auth::store_token(api_host, &response.token) + .context("Failed to store authentication token")?; + + // Fetch user info to display + let client_with_token = + LapdevClient::new(base_url).with_token(response.token.clone()); + + match client_with_token.whoami().await { + Ok(whoami) => { + println!(); + println!("{}", "✓ Authentication successful!".green().bold()); + println!(); + println!(" User: {}", whoami.email.bright_white()); + println!(" Organization: {}", whoami.organization_id); + println!(" Device: {}", whoami.device_name.bright_white()); + println!( + " Expires: {} days", + (response.expires_in / 86400).to_string().bright_white() + ); + } + Err(e) => { + // Token stored successfully but couldn't fetch user info + println!(); + println!("{}", "✓ Authentication successful!".green().bold()); + tracing::debug!("Could not fetch user info: {}", e); + } + } + + return Ok(()); + } + Ok(None) => { + // Still pending, continue polling + poll_count += 1; + if poll_count % 5 == 0 { + // Print progress every 10 seconds + println!( + " Still waiting... ({}s)", + poll_count * POLL_INTERVAL_SECONDS + ); + } + tokio::time::sleep(tokio::time::Duration::from_secs(POLL_INTERVAL_SECONDS)).await; + } + Err(e) => { + eprintln!("{}", format!("Error polling for token: {:?}", e).red()); + anyhow::bail!("Failed to authenticate: {}", e); + } + } + } +} diff --git a/crates/cli/src/devbox/commands/logout.rs b/crates/cli/src/devbox/commands/logout.rs new file mode 100644 index 0000000..a06b52f --- /dev/null +++ b/crates/cli/src/devbox/commands/logout.rs @@ -0,0 +1,31 @@ +use anyhow::Result; +use colored::Colorize; + +use crate::auth; + +pub async fn execute(api_host: &str) -> Result<()> { + // Check if token exists + if !auth::has_token(api_host) { + println!( + "{}", + "Not currently authenticated. Use 'lapdev devbox login' to sign in.".yellow() + ); + return Ok(()); + } + + // Delete token from keychain + auth::delete_token(api_host)?; + + println!("{}", "✓ Logged out successfully".green()); + println!(); + println!( + "{}", + "Your authentication token has been removed from the keychain.".dimmed() + ); + println!( + "{}", + "Note: This does not invalidate the token on the server.".dimmed() + ); + + Ok(()) +} diff --git a/crates/cli/src/devbox/commands/mod.rs b/crates/cli/src/devbox/commands/mod.rs new file mode 100644 index 0000000..8beb78e --- /dev/null +++ b/crates/cli/src/devbox/commands/mod.rs @@ -0,0 +1,4 @@ +pub mod connect; +pub mod login; +pub mod logout; +pub mod whoami; diff --git a/crates/cli/src/devbox/commands/whoami.rs b/crates/cli/src/devbox/commands/whoami.rs new file mode 100644 index 0000000..829b1c6 --- /dev/null +++ b/crates/cli/src/devbox/commands/whoami.rs @@ -0,0 +1,62 @@ +use anyhow::{Context, Result}; +use chrono::{DateTime, Utc}; +use colored::Colorize; + +use crate::{api::client::LapdevClient, auth}; + +pub async fn execute(api_host: &str, api_url: &str) -> Result<()> { + // Get token from keychain + let token = auth::get_token(api_host)?; + + // Create client and fetch user info + let client = LapdevClient::new(api_url.to_string()).with_token(token); + let whoami = client.whoami().await.context("Failed to fetch user info")?; + + // Display user info + println!(); + println!("{}", "Current Session:".bright_white().bold()); + println!(); + println!(" User: {}", whoami.email.bright_cyan()); + + if let Some(name) = whoami.name { + println!(" Name: {}", name); + } + + println!(" User ID: {}", whoami.user_id.to_string().dimmed()); + println!(" Organization: {}", whoami.organization_id); + println!(" Device: {}", whoami.device_name.bright_white()); + + if let Some(auth_at) = whoami.authenticated_at { + if let Ok(dt) = auth_at.parse::>() { + println!( + " Authenticated: {}", + dt.format("%Y-%m-%d %H:%M:%S UTC").to_string().dimmed() + ); + } + } + + if let Some(exp_at) = whoami.expires_at { + if let Ok(dt) = exp_at.parse::>() { + let now = Utc::now(); + let remaining = dt.signed_duration_since(now); + let days_remaining = remaining.num_days(); + + println!( + " Expires: {} ({} days remaining)", + dt.format("%Y-%m-%d %H:%M:%S UTC").to_string().dimmed(), + if days_remaining > 7 { + days_remaining.to_string().green() + } else if days_remaining > 0 { + days_remaining.to_string().yellow() + } else { + "EXPIRED".to_string().red() + } + ); + } + } + + println!(" API URL: {}", api_url.dimmed()); + println!(); + + Ok(()) +} diff --git a/crates/cli/src/devbox/dns/hosts.rs b/crates/cli/src/devbox/dns/hosts.rs new file mode 100644 index 0000000..5cd8629 --- /dev/null +++ b/crates/cli/src/devbox/dns/hosts.rs @@ -0,0 +1,312 @@ +use anyhow::{anyhow, Context, Result}; +#[cfg(unix)] +const EBUSY_ERRNO: i32 = 16; +use std::{fs::OpenOptions as StdOpenOptions, path::PathBuf}; +use tempfile::NamedTempFile; +use tokio::{fs::OpenOptions as AsyncOpenOptions, io::AsyncWriteExt}; + +#[cfg(windows)] +use std::path::Path; + +use super::ServiceEndpoint; + +const LINE_MARKER: &str = "# lapdev-devbox"; + +/// Manages /etc/hosts file entries for devbox DNS +pub struct HostsManager { + hosts_path: PathBuf, +} + +impl Default for HostsManager { + fn default() -> Self { + Self::new() + } +} + +impl HostsManager { + pub fn new() -> Self { + Self { + hosts_path: Self::get_hosts_path(), + } + } + + /// Get the platform-specific hosts file path + fn get_hosts_path() -> PathBuf { + if cfg!(windows) { + PathBuf::from(r"C:\Windows\System32\drivers\etc\hosts") + } else { + PathBuf::from("/etc/hosts") + } + } + + /// Write service endpoints to hosts file with per-line markers + pub async fn write_entries(&self, endpoints: &[ServiceEndpoint]) -> Result<()> { + // Read existing hosts file + let content = tokio::fs::read_to_string(&self.hosts_path) + .await + .with_context(|| format!("Failed to read hosts file: {:?}", self.hosts_path))?; + + // Remove existing lapdev entries + let mut cleaned = self.remove_existing_entries(&content); + + if !cleaned.ends_with('\n') && !cleaned.is_empty() { + cleaned.push('\n'); + } + + for endpoint in endpoints { + let aliases = endpoint.aliases().join(" "); + cleaned.push_str(&format!( + "{} {} {}\n", + endpoint.synthetic_ip, aliases, LINE_MARKER + )); + } + + // Write back (requires elevated permissions) + self.write_hosts_file(&cleaned).await?; + + Ok(()) + } + + /// Remove all lapdev entries from hosts file + pub async fn remove_entries(&self) -> Result<()> { + let content = tokio::fs::read_to_string(&self.hosts_path) + .await + .with_context(|| format!("Failed to read hosts file: {:?}", self.hosts_path))?; + + let cleaned = self.remove_existing_entries(&content); + + self.write_hosts_file(&cleaned).await?; + + Ok(()) + } + + fn remove_existing_entries(&self, content: &str) -> String { + let mut result = String::new(); + + for line in content.lines() { + if line.trim_end().ends_with(LINE_MARKER) { + continue; + } + result.push_str(line); + result.push('\n'); + } + + result + } + + /// Write content to hosts file with platform-specific line endings + async fn write_hosts_file(&self, content: &str) -> Result<()> { + #[cfg(windows)] + let content = { + // Preserve logical lines while enforcing CRLF endings + let mut lines: Vec = content + .lines() + .map(|line| line.trim_end_matches('\r').to_string()) + .collect(); + if let Some(true) = lines.last().map(|line| line.is_empty()) { + lines.pop(); + } + let mut normalized = lines.join("\r\n"); + normalized.push_str("\r\n"); + normalized + }; + + #[cfg(not(windows))] + let content = content.to_string(); + + let parent = self + .hosts_path + .parent() + .context("Hosts path has no parent directory")?; + + let temp_file = NamedTempFile::new_in(parent) + .with_context(|| format!("Failed to create temporary hosts file in {:?}", parent))?; + let temp_path = temp_file.path().to_path_buf(); + + let std_file = temp_file.as_file().try_clone().with_context(|| { + format!( + "Failed to clone temporary hosts file handle: {:?}", + temp_path + ) + })?; + let mut async_file = tokio::fs::File::from_std(std_file); + + async_file + .write_all(content.as_bytes()) + .await + .with_context(|| format!("Failed to write temporary hosts file: {:?}", temp_path))?; + async_file + .sync_all() + .await + .with_context(|| format!("Failed to sync temporary hosts file: {:?}", temp_path))?; + + drop(async_file); + + #[cfg(unix)] + if let Ok(metadata) = tokio::fs::metadata(&self.hosts_path).await { + use std::os::unix::fs::PermissionsExt; + let permissions = std::fs::Permissions::from_mode(metadata.permissions().mode()); + let _ = tokio::fs::set_permissions(&temp_path, permissions).await; + } + + #[cfg(windows)] + if let Ok(metadata) = tokio::fs::metadata(&self.hosts_path).await { + let permissions = metadata.permissions(); + let _ = tokio::fs::set_permissions(&temp_path, permissions).await; + } + + #[cfg(windows)] + { + use tempfile::TempPath; + + let temp_path_handle: TempPath = temp_file.into_temp_path(); + replace_file_windows(temp_path.as_path(), &self.hosts_path)?; + let _ = temp_path_handle.close(); + } + + #[cfg(not(windows))] + { + if let Err(err) = temp_file.persist(&self.hosts_path) { + #[cfg(unix)] + let is_busy = err.error.raw_os_error() == Some(EBUSY_ERRNO); + #[cfg(not(unix))] + let is_busy = false; + + if is_busy { + // Fall back to truncating the hosts file in-place when some Linux distros + // reject atomic renames with EBUSY (e.g. immutable bind mounts). + let mut fallback = AsyncOpenOptions::new() + .write(true) + .truncate(true) + .open(&self.hosts_path) + .await + .with_context(|| { + format!( + "Failed to open hosts file for fallback write: {:?}", + self.hosts_path + ) + })?; + + fallback + .write_all(content.as_bytes()) + .await + .with_context(|| { + format!( + "Failed to write hosts file during fallback: {:?}", + self.hosts_path + ) + })?; + fallback.sync_all().await.with_context(|| { + format!( + "Failed to sync hosts file during fallback: {:?}", + self.hosts_path + ) + })?; + + drop(fallback); + let _ = err.file.close(); + } else { + return Err(anyhow!( + "Failed to replace hosts file at {:?}: {}", + self.hosts_path, + err.error + )); + } + } + } + + #[cfg(unix)] + if let Ok(dir) = tokio::fs::File::open(parent).await { + let _ = dir.sync_all().await; + } + + Ok(()) + } + + /// Check if we have permission to write to hosts file + pub fn check_permissions(&self) -> bool { + StdOpenOptions::new() + .write(true) + .append(true) + .open(&self.hosts_path) + .is_ok() + } + + /// Print manual instructions if we can't modify hosts file + pub fn print_manual_instructions(&self, endpoints: &[ServiceEndpoint]) { + eprintln!("\n⚠️ Unable to automatically update hosts file."); + eprintln!( + "Please manually add the following entries to {:?}:", + self.hosts_path + ); + for endpoint in endpoints { + let aliases = endpoint.aliases().join(" "); + eprintln!("{} {} {}", endpoint.synthetic_ip, aliases, LINE_MARKER); + } + eprintln!(); + + if cfg!(unix) { + eprintln!("Run this command with sudo, or edit the file manually:"); + eprintln!(" sudo nano {:?}", self.hosts_path); + } else if cfg!(windows) { + eprintln!("Run this command as Administrator, or edit the file manually:"); + eprintln!(" notepad {:?}", self.hosts_path); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_remove_existing_entries_strips_tagged_lines() { + let manager = HostsManager::new(); + let content = r#"127.0.0.1 localhost +127.77.0.1 svc1.default.svc.cluster.local # lapdev-devbox +127.0.0.2 other +"#; + + let cleaned = manager.remove_existing_entries(content); + assert!(cleaned.contains("127.0.0.1 localhost")); + assert!(cleaned.contains("127.0.0.2 other")); + assert!(!cleaned.contains("lapdev-devbox")); + assert!(cleaned.ends_with('\n')); + } +} + +#[cfg(windows)] +fn replace_file_windows(temp_path: &Path, target_path: &Path) -> Result<()> { + use std::ffi::OsStr; + use std::os::windows::ffi::OsStrExt; + use windows_sys::Win32::Storage::FileSystem::{ReplaceFileW, REPLACEFILE_WRITE_THROUGH}; + + let target_w: Vec = target_path + .as_os_str() + .encode_wide() + .chain(Some(0)) + .collect(); + let temp_w: Vec = temp_path.as_os_str().encode_wide().chain(Some(0)).collect(); + + let success = unsafe { + ReplaceFileW( + target_w.as_ptr(), + temp_w.as_ptr(), + std::ptr::null(), + REPLACEFILE_WRITE_THROUGH, + std::ptr::null_mut(), + std::ptr::null_mut(), + ) + }; + + if success == 0 { + let err = std::io::Error::last_os_error(); + Err(anyhow!( + "Failed to replace hosts file at {:?}: {}", + target_path, + err + )) + } else { + Ok(()) + } +} diff --git a/crates/cli/src/devbox/dns/ip_allocator.rs b/crates/cli/src/devbox/dns/ip_allocator.rs new file mode 100644 index 0000000..a175085 --- /dev/null +++ b/crates/cli/src/devbox/dns/ip_allocator.rs @@ -0,0 +1,172 @@ +use std::{ + collections::{HashMap, HashSet}, + net::{IpAddr, Ipv4Addr}, +}; + +/// Allocates synthetic loopback IPs from a reserved subnet (127.77.0.0/16) +pub struct SyntheticIpAllocator { + /// Base IP: 127.77.0.0 + base: Ipv4Addr, + /// Next available offset (0-65535) tracked across the last two octets + next_offset: u16, + /// Allocated IPs mapped to their service keys (name.namespace) + allocated: HashMap, + /// Reverse mapping from IP to service key + reverse: HashMap, + /// Freed IPs that can be reused + free_pool: HashSet, +} + +impl Default for SyntheticIpAllocator { + fn default() -> Self { + Self::new() + } +} + +impl SyntheticIpAllocator { + pub fn new() -> Self { + Self { + base: Ipv4Addr::new(127, 77, 0, 0), + next_offset: 1, // Start from offset 1, skip 0 + allocated: HashMap::new(), + reverse: HashMap::new(), + free_pool: HashSet::new(), + } + } + + /// Allocate or reuse an IP for a service across all ports + pub fn allocate(&mut self, service_name: &str, namespace: &str) -> Option { + let key = format!("{}.{}", service_name, namespace); + + // If already allocated, return existing IP + if let Some(ip) = self.allocated.get(&key) { + return Some(*ip); + } + + // Try to allocate from free pool first + let offset = if let Some(offset) = self.free_pool.iter().next().copied() { + self.free_pool.remove(&offset); + offset + } else { + // Allocate new IP + if self.next_offset == 0 { + // Wrapped around, no more IPs available + return None; + } + let offset = self.next_offset; + self.next_offset = self.next_offset.wrapping_add(1); + offset + }; + + let base_octets = self.base.octets(); + let high = (offset >> 8) as u8; + let low = (offset & 0xFF) as u8; + let ip = IpAddr::V4(Ipv4Addr::new( + base_octets[0], + base_octets[1], + base_octets[2].wrapping_add(high), + low, + )); + + self.allocated.insert(key.clone(), ip); + self.reverse.insert(ip, key); + Some(ip) + } + + /// Deallocate an IP and return it to the free pool + pub fn deallocate(&mut self, service_name: &str, namespace: &str) { + let key = format!("{}.{}", service_name, namespace); + if let Some(ip) = self.allocated.remove(&key) { + self.reverse.remove(&ip); + if let IpAddr::V4(ipv4) = ip { + let ip_octets = ipv4.octets(); + let base_octets = self.base.octets(); + let high = (ip_octets[2] as u16).saturating_sub(base_octets[2] as u16); + let low = ip_octets[3] as u16; + let offset = (high << 8) | low; + if offset != 0 { + self.free_pool.insert(offset); + } + } + } + } + + /// Deallocate all IPs and reset + pub fn clear(&mut self) { + self.allocated.clear(); + self.reverse.clear(); + self.free_pool.clear(); + self.next_offset = 1; + } + + /// Get the service key for an IP address + pub fn lookup_ip(&self, ip: &IpAddr) -> Option<&str> { + self.reverse.get(ip).map(|s| s.as_str()) + } + + /// Get all allocated IPs + pub fn allocated_ips(&self) -> Vec { + self.allocated.values().copied().collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_allocate_and_reuse() { + let mut allocator = SyntheticIpAllocator::new(); + + let ip1 = allocator.allocate("service1", "default").unwrap(); + let ip2 = allocator.allocate("service1", "default").unwrap(); + + assert_eq!(ip1, ip2); // Should reuse same IP + + let ip3 = allocator.allocate("service2", "default").unwrap(); + assert_ne!(ip1, ip3); // Different service should get different IP + } + + #[test] + fn test_deallocate_and_reuse() { + let mut allocator = SyntheticIpAllocator::new(); + + let ip1 = allocator.allocate("service1", "default").unwrap(); + allocator.deallocate("service1", "default"); + + let ip2 = allocator.allocate("service2", "default").unwrap(); + assert_eq!(ip1, ip2); // Should reuse deallocated IP + } + + #[test] + fn test_lookup() { + let mut allocator = SyntheticIpAllocator::new(); + + let ip = allocator.allocate("service1", "default").unwrap(); + let key = allocator.lookup_ip(&ip).unwrap(); + assert_eq!(key, "service1.default"); + } + + #[test] + fn test_multi_port_same_ip() { + let mut allocator = SyntheticIpAllocator::new(); + + let ip_http = allocator.allocate("service1", "default").unwrap(); + let ip_https = allocator.allocate("service1", "default").unwrap(); + + assert_eq!(ip_http, ip_https); + } + + #[test] + fn test_allocate_beyond_24_bit_range() { + let mut allocator = SyntheticIpAllocator::new(); + + for i in 0..300 { + let service = format!("service{}", i); + assert!( + allocator.allocate(&service, "default").is_some(), + "allocation failed at index {i}" + ); + } + } +} diff --git a/crates/cli/src/devbox/dns/mod.rs b/crates/cli/src/devbox/dns/mod.rs new file mode 100644 index 0000000..abbaa27 --- /dev/null +++ b/crates/cli/src/devbox/dns/mod.rs @@ -0,0 +1,57 @@ +mod hosts; +mod ip_allocator; +mod service_bridge; + +pub use hosts::HostsManager; +pub use ip_allocator::SyntheticIpAllocator; +pub use service_bridge::{ServiceBridge, ServiceBridgeStartReport}; + +use lapdev_devbox_rpc::ServiceInfo; +use std::net::IpAddr; + +/// Returns true if the provided protocol string represents TCP. +pub fn is_tcp_protocol(protocol: &str) -> bool { + let trimmed = protocol.trim(); + trimmed.is_empty() || trimmed.eq_ignore_ascii_case("tcp") +} + +/// A service endpoint with synthetic IP and port +#[derive(Debug, Clone)] +pub struct ServiceEndpoint { + pub service_name: String, + pub namespace: String, + pub port: u16, + pub protocol: String, + pub synthetic_ip: IpAddr, + /// Fully qualified domain name: service.namespace.svc + pub fqdn: String, +} + +impl ServiceEndpoint { + pub fn new(info: &ServiceInfo, port: u16, protocol: String, synthetic_ip: IpAddr) -> Self { + let fqdn = format!("{}.{}.svc", info.name, info.namespace); + let normalized_protocol = if is_tcp_protocol(&protocol) { + "TCP".to_string() + } else { + protocol + }; + Self { + service_name: info.name.clone(), + namespace: info.namespace.clone(), + port, + protocol: normalized_protocol, + synthetic_ip, + fqdn, + } + } + + /// Returns all DNS aliases for this service + pub fn aliases(&self) -> Vec { + vec![ + self.service_name.clone(), + format!("{}.{}", self.service_name, self.namespace), + format!("{}.{}.svc", self.service_name, self.namespace), + self.fqdn.clone(), + ] + } +} diff --git a/crates/cli/src/devbox/dns/service_bridge.rs b/crates/cli/src/devbox/dns/service_bridge.rs new file mode 100644 index 0000000..959e2ce --- /dev/null +++ b/crates/cli/src/devbox/dns/service_bridge.rs @@ -0,0 +1,213 @@ +use anyhow::{Context, Result}; +use lapdev_tunnel::TunnelClient; +use std::{collections::HashMap, net::SocketAddr, sync::Arc}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::RwLock, + task::JoinHandle, +}; +use tracing::{debug, error, info, warn}; + +use super::{is_tcp_protocol, ServiceEndpoint}; + +/// Bridges TCP connections from synthetic IPs to services via tunnel +pub struct ServiceBridge { + /// Maps synthetic IP:port to service endpoint info + endpoints: Arc>>, + /// Listener tasks for each synthetic IP + listeners: Arc>>>, + /// Tunnel client for connecting to services + tunnel_client: Arc>>>, +} + +#[derive(Debug, Default)] +pub struct ServiceBridgeStartReport { + pub started: Vec, + pub failed: Vec<(ServiceEndpoint, anyhow::Error)>, +} + +impl ServiceBridge { + pub fn new(tunnel_client: Arc>>>) -> Self { + Self { + endpoints: Arc::new(RwLock::new(HashMap::new())), + listeners: Arc::new(RwLock::new(Vec::new())), + tunnel_client, + } + } + + /// Start listening on synthetic IPs for the given service endpoints + pub async fn start(&self, endpoints: Vec) -> ServiceBridgeStartReport { + // Stop existing listeners + self.stop().await; + + let mut tcp_endpoints = Vec::new(); + let mut skipped = Vec::new(); + + for endpoint in endpoints { + if is_tcp_protocol(&endpoint.protocol) { + tcp_endpoints.push(endpoint); + } else { + skipped.push(endpoint); + } + } + + if !skipped.is_empty() { + for endpoint in &skipped { + warn!( + service = %endpoint.service_name, + namespace = %endpoint.namespace, + port = endpoint.port, + protocol = %endpoint.protocol, + "Skipping non-TCP service endpoint; protocol not supported" + ); + } + } + + { + // Clear any existing endpoint mappings before rebuilding + let mut endpoint_map = self.endpoints.write().await; + endpoint_map.clear(); + } + + // Start listener for each unique IP:port combination + let tunnel_client = Arc::clone(&self.tunnel_client); + let mut tasks = Vec::new(); + let mut started = Vec::new(); + let mut failed = Vec::new(); + + for endpoint in tcp_endpoints { + let addr = SocketAddr::new(endpoint.synthetic_ip, endpoint.port); + match self + .spawn_listener(addr, endpoint.clone(), Arc::clone(&tunnel_client)) + .await + { + Ok(task) => { + { + let mut endpoint_map = self.endpoints.write().await; + endpoint_map.insert(addr, endpoint.clone()); + } + tasks.push(task); + started.push(endpoint); + } + Err(e) => { + error!("Failed to start listener on {}: {}", addr, e); + failed.push((endpoint, e)); + } + } + } + + *self.listeners.write().await = tasks; + + let num_started = started.len(); + info!("Service bridge started with {} endpoints", num_started); + + ServiceBridgeStartReport { started, failed } + } + + /// Stop all listeners + pub async fn stop(&self) { + let mut listeners = self.listeners.write().await; + for task in listeners.drain(..) { + task.abort(); + } + info!("Service bridge stopped"); + } + + /// Spawn a TCP listener for a specific address + async fn spawn_listener( + &self, + addr: SocketAddr, + endpoint: ServiceEndpoint, + tunnel_client: Arc>>>, + ) -> Result> { + let listener = TcpListener::bind(addr) + .await + .with_context(|| format!("Failed to bind to {}", addr))?; + + let endpoints = Arc::clone(&self.endpoints); + + info!( + "Started listener on {} for {}/{} (port {})", + addr, endpoint.namespace, endpoint.service_name, endpoint.port + ); + + let task = tokio::spawn(async move { + loop { + match listener.accept().await { + Ok((stream, peer_addr)) => { + debug!("Accepted connection from {} to {}", peer_addr, addr); + + let endpoints = Arc::clone(&endpoints); + let tunnel_client = Arc::clone(&tunnel_client); + + tokio::spawn(async move { + if let Err(e) = + handle_connection(stream, addr, endpoints, tunnel_client).await + { + error!("Connection handler error: {:?}", e); + } + }); + } + Err(e) => { + error!("Failed to accept connection on {}: {}", addr, e); + } + } + } + }); + + Ok(task) + } +} + +/// Handle a single TCP connection by proxying it through the tunnel +async fn handle_connection( + mut local_stream: TcpStream, + local_addr: SocketAddr, + endpoints: Arc>>, + tunnel_client: Arc>>>, +) -> Result<()> { + // Look up the service endpoint + let endpoint = { + let endpoints = endpoints.read().await; + endpoints + .get(&local_addr) + .cloned() + .context(format!("No endpoint found for {}", local_addr))? + }; + + let tunnel_client = { + let guard = tunnel_client.read().await; + guard + .as_ref() + .cloned() + .context("Tunnel client not available")? + }; + + debug!( + "Connecting to service: {} ({}:{})", + endpoint.fqdn, endpoint.fqdn, endpoint.port + ); + + // Connect through tunnel + let tunnel_stream = tunnel_client + .connect_tcp(&endpoint.fqdn, endpoint.port) + .await + .context("Failed to establish tunnel connection")?; + + debug!("Tunnel established to {}", endpoint.fqdn); + + // Proxy bytes bidirectionally + match tokio::io::copy_bidirectional(&mut local_stream, &mut Box::pin(tunnel_stream)).await { + Ok((to_remote, to_local)) => { + debug!( + "Connection closed: {} bytes to remote, {} bytes to local", + to_remote, to_local + ); + } + Err(e) => { + warn!("Proxy error: {}", e); + } + } + + Ok(()) +} diff --git a/crates/cli/src/devbox/mod.rs b/crates/cli/src/devbox/mod.rs new file mode 100644 index 0000000..95a51e0 --- /dev/null +++ b/crates/cli/src/devbox/mod.rs @@ -0,0 +1,27 @@ +use clap::Subcommand; + +use crate::config; + +pub mod commands; +pub mod dns; + +#[derive(Subcommand)] +pub enum DevboxCommand { + /// Connect to Lapdev devbox and establish port forwarding tunnels + Connect { + /// API host (e.g. app.lap.dev) + #[arg(long = "api-host", alias = "api-url", env = "LAPDEV_API_HOST")] + api_host: Option, + }, +} + +pub async fn handle_command(command: DevboxCommand) -> anyhow::Result<()> { + match command { + DevboxCommand::Connect { api_host } => { + let (api_host, _) = config::resolve_api_base_url(api_host); + commands::connect::execute(&api_host).await?; + } + } + + Ok(()) +} diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs new file mode 100644 index 0000000..57a0335 --- /dev/null +++ b/crates/cli/src/main.rs @@ -0,0 +1,148 @@ +use std::{fs, path::PathBuf}; + +use anyhow::Context; +use clap::{Parser, Subcommand}; + +mod api; +mod auth; +mod config; +mod devbox; + +#[derive(Parser)] +#[command(name = "lapdev")] +#[command(about = "Lapdev CLI - Self-Hosted Remote Dev Environment", long_about = None)] +#[command(version)] +struct Cli { + #[command(subcommand)] + command: Commands, + + /// Enable verbose logging + #[arg(short, long, global = true)] + verbose: bool, + + /// Directory for lapdev-cli log files + #[arg(long = "log-dir", env = "LAPDEV_LOG_DIR", value_name = "PATH")] + log_dir: Option, +} + +#[derive(Subcommand)] +enum Commands { + /// Authenticate with Lapdev + Login { + /// Device name (defaults to hostname) + #[arg(long)] + device_name: Option, + + /// API host (e.g. app.lap.dev) + #[arg(long = "api-host", alias = "api-url", env = "LAPDEV_API_HOST")] + api_host: Option, + }, + + /// Sign out (deletes token from keychain) + Logout { + /// API host (e.g. app.lap.dev) + #[arg(long = "api-host", alias = "api-url", env = "LAPDEV_API_HOST")] + api_host: Option, + }, + + /// Show current session info + Whoami { + /// API host (e.g. app.lap.dev) + #[arg(long = "api-host", alias = "api-url", env = "LAPDEV_API_HOST")] + api_host: Option, + }, + + /// Devbox commands + Devbox { + #[command(subcommand)] + command: devbox::DevboxCommand, + }, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let cli = Cli::parse(); + + let log_dir = resolve_log_dir(cli.log_dir.clone())?; + fs::create_dir_all(&log_dir) + .with_context(|| format!("failed to create log directory at {}", log_dir.display()))?; + let file_appender = tracing_appender::rolling::daily(&log_dir, "lapdev-cli.log"); + let (file_writer, _file_guard) = tracing_appender::non_blocking(file_appender); + + // Setup logging + let log_level = if cli.verbose { + tracing::Level::DEBUG + } else { + tracing::Level::INFO + }; + + use tracing_subscriber::{fmt, prelude::*, EnvFilter}; + + // Filter out tarpc internal tracing + let filter = EnvFilter::from_default_env() + .add_directive(log_level.into()) + .add_directive("tarpc=warn".parse().unwrap()) + .add_directive("tarpc::client=warn".parse().unwrap()); // Only show tarpc warnings/errors + + let file_layer = fmt::layer() + .with_target(true) + .with_ansi(false) + .with_writer(file_writer); + + tracing_subscriber::registry() + .with(filter) + .with(file_layer) + .init(); + + tracing::debug!("writing logs to {}", log_dir.display()); + + // Handle commands + match cli.command { + Commands::Login { + device_name, + api_host, + } => { + let (api_host, _) = config::resolve_api_base_url(api_host); + devbox::commands::login::execute(&api_host, device_name).await?; + } + Commands::Logout { api_host } => { + let (api_host, _) = config::resolve_api_base_url(api_host); + devbox::commands::logout::execute(&api_host).await?; + } + Commands::Whoami { api_host } => { + let (api_host, api_url) = config::resolve_api_base_url(api_host); + devbox::commands::whoami::execute(&api_host, &api_url).await?; + } + Commands::Devbox { command } => { + devbox::handle_command(command).await?; + } + } + + Ok(()) +} + +fn resolve_log_dir(override_dir: Option) -> anyhow::Result { + override_dir + .or_else(default_log_dir) + .ok_or_else(|| anyhow::anyhow!("unable to determine log directory")) +} + +#[cfg(target_os = "macos")] +fn default_log_dir() -> Option { + dirs::home_dir().map(|home| home.join("Library").join("Logs").join("Lapdev")) +} + +#[cfg(target_os = "windows")] +fn default_log_dir() -> Option { + dirs::data_local_dir() + .or_else(|| dirs::home_dir().map(|home| home.join("AppData").join("Local"))) + .map(|base| base.join("Lapdev").join("logs")) +} + +#[cfg(not(any(target_os = "macos", target_os = "windows")))] +fn default_log_dir() -> Option { + std::env::var_os("XDG_STATE_HOME") + .map(PathBuf::from) + .or_else(|| dirs::home_dir().map(|home| home.join(".local").join("state"))) + .map(|base| base.join("lapdev").join("logs")) +} diff --git a/lapdev-common/Cargo.toml b/crates/common/Cargo.toml similarity index 93% rename from lapdev-common/Cargo.toml rename to crates/common/Cargo.toml index 696b864..b895f2d 100644 --- a/lapdev-common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -12,6 +12,7 @@ serde_json.workspace = true anyhow.workspace = true base16ct.workspace = true sha2.workspace = true +secrecy.workspace = true rand.workspace = true strum.workspace = true strum_macros.workspace = true diff --git a/crates/common/pages/error_page.html b/crates/common/pages/error_page.html new file mode 100644 index 0000000..1cd9326 --- /dev/null +++ b/crates/common/pages/error_page.html @@ -0,0 +1,160 @@ + + + + Lapdev Dashboard + + + +
+
+ + + + + + + Lapdev + +

Preview URL issue

+

We couldn’t reach this preview

+

+ Lapdev tried to route your Preview URL but the backing environment didn’t respond as expected. Review the reported reason below and follow the steps to get things moving again. +

+
+ What went wrong +

{{MESSAGE}}

+
+
    +
  • Confirm the environment is running and the correct revision is deployed.
  • +
  • Verify this Preview URL still publishes the service/port you expect.
  • +
  • Ensure you’re signed into the Lapdev organization that owns this link.
  • +
+

+ If you manage this environment: restart or redeploy it, then recreate the Preview URL if the issue persists. +

+

+ If you’re viewing someone else’s link: share this page with the owner so they can republish the service or grant access. +

+

Need a hand? Ask a Lapdev admin or teammate to review the environment status from the dashboard.

+
+
+ + diff --git a/crates/common/src/config.rs b/crates/common/src/config.rs new file mode 100644 index 0000000..d1e9349 --- /dev/null +++ b/crates/common/src/config.rs @@ -0,0 +1 @@ +pub const LAPDEV_CLUSTER_NOT_INITIATED: &str = "lapdev-cluster-not-initiated"; diff --git a/lapdev-common/src/console.rs b/crates/common/src/console.rs similarity index 100% rename from lapdev-common/src/console.rs rename to crates/common/src/console.rs diff --git a/crates/common/src/devbox.rs b/crates/common/src/devbox.rs new file mode 100644 index 0000000..1cf8f2e --- /dev/null +++ b/crates/common/src/devbox.rs @@ -0,0 +1,80 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::net::SocketAddr; +use uuid::Uuid; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxSessionSummary { + pub id: Uuid, + pub device_name: String, + pub token_prefix: String, + pub active_environment_id: Option, + pub created_at: DateTime, + pub expires_at: DateTime, + pub last_used_at: DateTime, + pub revoked_at: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxSessionWhoAmI { + pub user_id: Uuid, + pub email: Option, + pub device_name: String, + pub authenticated_at: Option>, + pub expires_at: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxEnvironmentSelection { + pub environment_id: Uuid, + pub cluster_name: String, + pub namespace: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxPortMapping { + pub workload_port: u16, + pub local_port: u16, + pub protocol: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxPortMappingOverride { + pub workload_port: u16, + pub local_port: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxWorkloadInterceptSummary { + pub intercept_id: Uuid, + pub session_id: Uuid, + pub workload_id: Uuid, + pub workload_name: String, + pub namespace: String, + pub port_mappings: Vec, + pub created_at: DateTime, + pub restored_at: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxWorkloadInterceptListResponse { + pub intercepts: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DevboxStartWorkloadInterceptResponse { + pub intercept_id: Uuid, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DirectTunnelCredential { + pub token: String, + pub expires_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DirectTunnelConfig { + pub credential: DirectTunnelCredential, + pub server_certificate: Option>, + pub stun_observed_addr: Option, +} diff --git a/lapdev-common/src/devcontainer.rs b/crates/common/src/devcontainer.rs similarity index 100% rename from lapdev-common/src/devcontainer.rs rename to crates/common/src/devcontainer.rs diff --git a/crates/common/src/error_page.rs b/crates/common/src/error_page.rs new file mode 100644 index 0000000..03030fc --- /dev/null +++ b/crates/common/src/error_page.rs @@ -0,0 +1,43 @@ +const TEMPLATE: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/pages/error_page.html" +)); +const MESSAGE_PLACEHOLDER: &str = "{{MESSAGE}}"; + +/// Render the shared error page template with the provided message. +pub fn render_error_page(message: &str) -> String { + TEMPLATE.replace(MESSAGE_PLACEHOLDER, &escape_html(message)) +} + +fn escape_html(input: &str) -> String { + let mut escaped = String::with_capacity(input.len()); + for ch in input.chars() { + match ch { + '&' => escaped.push_str("&"), + '<' => escaped.push_str("<"), + '>' => escaped.push_str(">"), + '"' => escaped.push_str("""), + '\'' => escaped.push_str("'"), + _ => escaped.push(ch), + } + } + escaped +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn replaces_placeholder() { + let page = render_error_page("Example"); + assert!(page.contains("Example")); + assert!(!page.contains(MESSAGE_PLACEHOLDER)); + } + + #[test] + fn escapes_html() { + let page = render_error_page(""); + assert!(page.contains("<script>alert('xss')</script>")); + } +} diff --git a/crates/common/src/hrpc.rs b/crates/common/src/hrpc.rs new file mode 100644 index 0000000..2ab9708 --- /dev/null +++ b/crates/common/src/hrpc.rs @@ -0,0 +1,10 @@ +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct HrpcError { + pub error: String, +} + +impl From for anyhow::Error { + fn from(err: HrpcError) -> Self { + anyhow::anyhow!(err.error) + } +} diff --git a/crates/common/src/kube.rs b/crates/common/src/kube.rs new file mode 100644 index 0000000..147447b --- /dev/null +++ b/crates/common/src/kube.rs @@ -0,0 +1,477 @@ +use std::collections::BTreeMap; + +use chrono::{DateTime, FixedOffset, Utc}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +pub const KUBE_CLUSTER_TOKEN_HEADER: &str = "X-Cluster-Token"; +pub const KUBE_CLUSTER_TOKEN_ENV_VAR: &str = "LAPDEV_KUBE_CLUSTER_TOKEN"; +pub const KUBE_CLUSTER_URL_ENV_VAR: &str = "LAPDEV_KUBE_CLUSTER_URL"; +pub const KUBE_CLUSTER_TUNNEL_URL_ENV_VAR: &str = "LAPDEV_KUBE_CLUSTER_TUNNEL_URL"; +pub const DEFAULT_KUBE_CLUSTER_URL: &str = "wss://ws.lap.dev/api/v1/kube/cluster/rpc"; +pub const DEFAULT_KUBE_CLUSTER_TUNNEL_URL: &str = "wss://ws.lap.dev/api/v1/kube/cluster/tunnel"; + +pub const KUBE_ENVIRONMENT_TOKEN_HEADER: &str = "X-Lapdev-Environment-Token"; +pub const KUBE_ENVIRONMENT_TOKEN_HEADER_LOWER: &str = "x-lapdev-environment-token"; + +pub const SIDECAR_PROXY_MANAGER_ADDR_ENV_VAR: &str = "LAPDEV_SIDECAR_PROXY_MANAGER_ADDR"; +pub const SIDECAR_PROXY_MANAGER_PORT_ENV_VAR: &str = "LAPDEV_SIDECAR_PROXY_MANAGER_PORT"; +pub const DEFAULT_SIDECAR_PROXY_MANAGER_PORT: u16 = 5001; +pub const SIDECAR_PROXY_PORT_ENV_VAR: &str = "LAPDEV_SIDECAR_PROXY_PORT"; +pub const DEFAULT_SIDECAR_PROXY_PORT: u16 = 25001; +pub const DEFAULT_SIDECAR_PROXY_METRICS_PORT: u16 = 25090; +pub const SIDECAR_PROXY_DYNAMIC_PORT_START: u16 = DEFAULT_SIDECAR_PROXY_PORT + 1000; +pub const SIDECAR_PROXY_WORKLOAD_ENV_VAR: &str = "LAPDEV_SIDECAR_PROXY_WORKLOAD"; +pub const SIDECAR_PROXY_BIND_ADDR_ENV_VAR: &str = "LAPDEV_SIDECAR_PROXY_BIND_ADDR"; +pub const DEFAULT_SIDECAR_PROXY_BIND_ADDR: &str = "0.0.0.0"; +pub const SIDECAR_PROXY_PORT_ROUTES_ENV_VAR: &str = "LAPDEV_SIDECAR_PROXY_PORT_ROUTES"; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeCluster { + pub id: Uuid, + pub name: String, + pub can_deploy_personal: bool, + pub can_deploy_shared: bool, + pub info: KubeClusterInfo, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeClusterInfo { + pub cluster_version: String, + pub node_count: u32, + pub available_cpu: String, + pub available_memory: String, + pub provider: Option, + pub region: Option, + pub status: KubeClusterStatus, + pub manager_namespace: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClusterStatusEvent { + pub organization_id: Uuid, + pub cluster_id: Uuid, + pub status: KubeClusterStatus, + pub cluster_version: Option, + pub region: Option, + pub updated_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AppCatalogStatusEvent { + pub organization_id: Uuid, + pub catalog_id: Uuid, + pub cluster_id: Uuid, + pub sync_version: i64, + pub last_synced_at: Option, + pub last_sync_actor_id: Option, + pub updated_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnvironmentWorkloadStatusEvent { + pub organization_id: Uuid, + pub environment_id: Uuid, + pub workload_id: Uuid, + pub ready_replicas: Option, + pub updated_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize, strum_macros::EnumString, strum_macros::Display)] +pub enum KubeClusterStatus { + Ready, + #[strum(to_string = "Not Ready")] + NotReady, + Provisioning, + Error, +} + +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + strum_macros::EnumString, + strum_macros::Display, + PartialEq, + Eq, +)] +pub enum KubeEnvironmentSyncStatus { + Idle, + Syncing, +} + +#[derive( + Debug, + Clone, + Copy, + Serialize, + Deserialize, + strum_macros::EnumString, + strum_macros::Display, + PartialEq, + Eq, +)] +pub enum KubeEnvironmentStatus { + Creating, + Running, + Failed, + Error, + Pausing, + Paused, + PauseFailed, + Resuming, + ResumeFailed, + Deleting, + Deleted, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeWorkload { + pub name: String, + pub namespace: String, + pub kind: KubeWorkloadKind, + pub replicas: Option, + pub ready_replicas: Option, + pub created_at: Option, + pub labels: std::collections::HashMap, +} + +#[derive( + PartialEq, Debug, Clone, Serialize, Deserialize, strum_macros::Display, strum_macros::EnumString, +)] +pub enum KubeWorkloadKind { + Deployment, + StatefulSet, + DaemonSet, + ReplicaSet, + Pod, + Job, + CronJob, +} + +#[derive( + Debug, Clone, Serialize, Deserialize, PartialEq, strum_macros::EnumString, strum_macros::Display, +)] +pub enum PreviewUrlAccessLevel { + Organization, // Accessible by authenticated organization members + Public, // Accessible by anyone without authentication +} + +impl PreviewUrlAccessLevel { + pub fn get_detailed_message(&self) -> &'static str { + match self { + PreviewUrlAccessLevel::Organization => { + "Organization - Members of your Lapdev org after login" + } + PreviewUrlAccessLevel::Public => "Public - Anyone can access without authentication", + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeWorkloadList { + pub workloads: Vec, + pub next_cursor: Option, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PaginationCursor { + pub workload_kind: KubeWorkloadKind, + pub continue_token: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaginationParams { + pub cursor: Option, + pub limit: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PagePaginationParams { + pub page: usize, + pub page_size: usize, +} + +impl Default for PagePaginationParams { + fn default() -> Self { + Self { + page: 1, + page_size: 20, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaginatedInfo { + pub total_count: usize, + pub page: usize, + pub page_size: usize, + pub total_pages: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaginatedResult { + pub data: Vec, + pub pagination_info: PaginatedInfo, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvironmentStatusCount { + pub status: KubeEnvironmentStatus, + pub count: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct KubeEnvironmentDashboardSummary { + pub personal_count: usize, + pub shared_count: usize, + pub branch_count: usize, + pub total_count: usize, + pub status_breakdown: Vec, + pub recent_environments: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreateKubeClusterResponse { + pub cluster_id: Uuid, + pub token: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeNamespace { + pub id: Uuid, + pub name: String, + pub description: Option, + pub is_shared: bool, + pub created_at: DateTime, + pub created_by: Uuid, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeNamespaceInfo { + pub name: String, + pub status: String, + pub created_at: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeAppCatalog { + pub id: Uuid, + pub name: String, + pub description: Option, + pub created_at: DateTime, + pub created_by: Uuid, + pub cluster_id: Uuid, + pub cluster_name: String, + pub last_sync_actor_id: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeAppCatalogWorkload { + pub id: Uuid, + pub name: String, + pub namespace: String, + pub kind: KubeWorkloadKind, + pub containers: Vec, + pub ports: Vec, + pub workload_yaml: String, + pub catalog_sync_version: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeAppCatalogWorkloadCreate { + pub name: String, + pub namespace: String, + pub kind: KubeWorkloadKind, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvironmentWorkload { + pub id: Uuid, + pub created_at: DateTime, + pub environment_id: Uuid, + pub base_workload_id: Option, + pub name: String, + pub namespace: String, + pub kind: String, + pub containers: Vec, + pub ports: Vec, + pub workload_yaml: String, + pub catalog_sync_version: i64, + pub ready_replicas: Option, +} + +impl KubeEnvironmentWorkload { + /// Returns true if any containers in this workload have been customized + pub fn has_customized_containers(&self) -> bool { + self.containers.iter().any(|c| c.is_customized()) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvVar { + pub name: String, + pub value: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum KubeContainerImage { + /// Follow the image from the original workload (tracks updates) + FollowOriginal, + /// Use a custom specified image + Custom(String), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeContainerPort { + pub name: Option, + pub container_port: i32, + pub protocol: Option, // TCP, UDP, SCTP +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeContainerInfo { + pub name: String, + pub original_image: String, + pub image: KubeContainerImage, + pub cpu_request: Option, + pub cpu_limit: Option, + pub memory_request: Option, + pub memory_limit: Option, + pub env_vars: Vec, + pub original_env_vars: Vec, + pub ports: Vec, +} + +impl KubeContainerInfo { + /// Returns true if this container has been customized from its original configuration + pub fn is_customized(&self) -> bool { + // Check if image is customized + matches!(self.image, KubeContainerImage::Custom(_)) + // Check if environment variables are customized (non-empty indicates customization) + || !self.env_vars.is_empty() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeWorkloadDetails { + pub name: String, + pub namespace: String, + pub kind: KubeWorkloadKind, + pub containers: Vec, + pub ports: Vec, + pub workload_yaml: String, + pub base_workload_id: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvironment { + pub id: Uuid, + pub user_id: Uuid, + pub name: String, + pub namespace: String, + pub app_catalog_id: Uuid, + pub app_catalog_name: String, + pub cluster_id: Uuid, + pub cluster_name: String, + pub status: KubeEnvironmentStatus, + pub created_at: String, + pub is_shared: bool, + pub base_environment_id: Option, + pub base_environment_name: Option, + pub base_environment_catalog_sync_version: Option, + pub base_environment_last_catalog_synced_at: Option, + pub catalog_sync_version: i64, + pub last_catalog_synced_at: Option, + pub paused_at: Option, + pub resumed_at: Option, + pub catalog_update_available: bool, + pub catalog_last_sync_actor_id: Option, + pub sync_status: KubeEnvironmentSyncStatus, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvironmentWorkloadDetail { + pub environment: KubeEnvironment, + pub workload: KubeEnvironmentWorkload, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeServicePort { + pub name: Option, + pub port: i32, + pub target_port: Option, + pub protocol: Option, + pub node_port: Option, + #[serde(default)] + pub original_target_port: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeServiceDetails { + pub name: String, + pub ports: Vec, + pub selector: BTreeMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeServiceWithYaml { + pub yaml: String, + pub details: KubeServiceDetails, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ProxyPortRoute { + /// Port exposed by the sidecar proxy inside the pod. + pub proxy_port: u16, + /// Service port advertised to the cluster. + pub service_port: u16, + /// Container port on the workload that should receive the traffic. + pub target_port: u16, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvironmentService { + pub id: Uuid, + pub created_at: DateTime, + pub environment_id: Uuid, + pub name: String, + pub namespace: String, + pub yaml: String, + pub ports: Vec, + pub selector: BTreeMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KubeEnvironmentPreviewUrl { + pub id: Uuid, + pub created_at: DateTime, + pub environment_id: Uuid, + pub service_id: Uuid, + pub name: String, + pub description: Option, + pub port: i32, + pub port_name: Option, + pub protocol: String, + pub access_level: PreviewUrlAccessLevel, + pub created_by: Uuid, + pub last_accessed_at: Option>, + pub url: String, // Full preview URL +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreateKubeEnvironmentPreviewUrlRequest { + pub description: Option, + pub service_id: Uuid, + pub port: i32, + pub port_name: Option, + pub protocol: Option, // Default to "HTTP" + pub access_level: Option, // Default to Organization +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UpdateKubeEnvironmentPreviewUrlRequest { + pub description: Option, + pub access_level: Option, +} diff --git a/lapdev-common/src/lib.rs b/crates/common/src/lib.rs similarity index 98% rename from lapdev-common/src/lib.rs rename to crates/common/src/lib.rs index 318d55f..0cf51b8 100644 --- a/lapdev-common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -1,3 +1,13 @@ +pub mod config; +pub mod console; +pub mod devbox; +pub mod devcontainer; +pub mod error_page; +pub mod hrpc; +pub mod kube; +pub mod token; +pub mod utils; + use std::collections::HashMap; use chrono::{DateTime, FixedOffset}; @@ -6,13 +16,12 @@ use serde::{Deserialize, Serialize}; use strum_macros::EnumString; use uuid::Uuid; -pub mod console; -pub mod devcontainer; -pub mod utils; - pub const LAPDEV_DEFAULT_OSUSER: &str = "lapdev"; pub const LAPDEV_BASE_HOSTNAME: &str = "lapdev-base-hostname"; +pub const LAPDEV_API_HOST: &str = "app.lap.dev"; pub const LAPDEV_ISOLATE_CONTAINER: &str = "lapdev-isolate-container"; +pub const LAPDEV_AUTH_STATE_COOKIE: &str = "lapdev_auth_state"; +pub const LAPDEV_AUTH_TOKEN_COOKIE: &str = "lapdev_auth_token"; #[derive(Serialize, Deserialize, Debug)] pub struct NewProject { diff --git a/crates/common/src/token.rs b/crates/common/src/token.rs new file mode 100644 index 0000000..5768bf3 --- /dev/null +++ b/crates/common/src/token.rs @@ -0,0 +1,49 @@ +use rand::distr::{Alphanumeric, SampleString}; +use secrecy::{ExposeSecret, SecretSlice, SecretString}; +use sha2::{Digest, Sha256}; + +const TOKEN_LENGTH: usize = 64; + +pub struct HashedToken(SecretSlice); + +impl HashedToken { + pub fn parse(plaintext: &str) -> Self { + let sha256 = Self::hash(plaintext).into(); + Self(sha256) + } + + pub fn hash(plaintext: &str) -> Vec { + Sha256::digest(plaintext.as_bytes()).as_slice().to_vec() + } +} + +impl ExposeSecret<[u8]> for HashedToken { + fn expose_secret(&self) -> &[u8] { + self.0.expose_secret() + } +} + +pub struct PlainToken(SecretString); + +impl PlainToken { + pub fn generate() -> Self { + let plaintext = generate_secure_alphanumeric_string(TOKEN_LENGTH).into(); + + Self(plaintext) + } + + pub fn hashed(&self) -> HashedToken { + let sha256 = HashedToken::hash(self.expose_secret()).into(); + HashedToken(sha256) + } +} + +impl ExposeSecret for PlainToken { + fn expose_secret(&self) -> &str { + self.0.expose_secret() + } +} + +fn generate_secure_alphanumeric_string(len: usize) -> String { + Alphanumeric.sample_string(&mut rand::rng(), len) +} diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs new file mode 100644 index 0000000..07a3f94 --- /dev/null +++ b/crates/common/src/utils.rs @@ -0,0 +1,82 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::LAPDEV_API_HOST; + +use rand::{distr::Alphanumeric, Rng}; +use sha2::{Digest, Sha256}; + +pub fn rand_string(n: usize) -> String { + rand::rng() + .sample_iter(&Alphanumeric) + .take(n) + .map(|i| i as char) + .collect::() + .to_lowercase() +} + +pub fn sha256(input: &str) -> String { + let hash = Sha256::digest(input.as_bytes()); + base16ct::lower::encode_string(&hash) +} + +pub fn format_repo_url(repo: &str) -> String { + let repo = repo.trim().to_lowercase(); + let repo = if !repo.starts_with("http://") + && !repo.starts_with("https://") + && !repo.starts_with("ssh://") + { + format!("https://{repo}") + } else { + repo.to_string() + }; + repo.strip_suffix('/') + .map(|r| r.to_string()) + .unwrap_or(repo) +} + +pub fn unix_timestamp() -> anyhow::Result { + Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()) +} + +/// Resolve the Lapdev API host from an optional input string. +/// +/// Accepts raw values that may include schemes (http/https/ws/wss), +/// trailing slashes, or the `/api` & `/api/v1` suffixes. Returns the +/// canonical host form (no scheme, no path). Falls back to the default +/// host when the input is `None` or empty. +pub fn resolve_api_host(raw: Option<&str>) -> String { + let candidate = raw + .map(|value| value.trim()) + .filter(|value| !value.is_empty()) + .map(|value| value.to_string()); + + let mut host = match candidate { + Some(value) => value, + None => return LAPDEV_API_HOST.to_string(), + }; + + for prefix in ["https://", "http://", "wss://", "ws://"] { + if let Some(stripped) = host.strip_prefix(prefix) { + host = stripped.to_string(); + break; + } + } + + host = host + .trim_start_matches('/') + .trim_end_matches('/') + .to_string(); + + for suffix in ["/api/v1", "/api"] { + if let Some(stripped) = host.strip_suffix(suffix) { + host = stripped.trim_end_matches('/').to_string(); + break; + } + } + + if host.is_empty() { + LAPDEV_API_HOST.to_string() + } else { + host + } +} diff --git a/lapdev-conductor/Cargo.toml b/crates/conductor/Cargo.toml similarity index 95% rename from lapdev-conductor/Cargo.toml rename to crates/conductor/Cargo.toml index 9a07084..6a877c1 100644 --- a/lapdev-conductor/Cargo.toml +++ b/crates/conductor/Cargo.toml @@ -24,6 +24,7 @@ sea-orm.workspace = true tarpc.workspace = true uuid.workspace = true lapdev-db.workspace = true +lapdev-db-entities.workspace = true lapdev-rpc.workspace = true lapdev-common.workspace = true lapdev-enterprise.workspace = true diff --git a/lapdev-conductor/src/lib.rs b/crates/conductor/src/lib.rs similarity index 100% rename from lapdev-conductor/src/lib.rs rename to crates/conductor/src/lib.rs diff --git a/lapdev-conductor/src/rpc.rs b/crates/conductor/src/rpc.rs similarity index 97% rename from lapdev-conductor/src/rpc.rs rename to crates/conductor/src/rpc.rs index 3746a0e..ff1f95f 100644 --- a/lapdev-conductor/src/rpc.rs +++ b/crates/conductor/src/rpc.rs @@ -1,6 +1,5 @@ use anyhow::Result; use lapdev_common::{BuildTarget, HostWorkspace, PrebuildUpdateEvent, WorkspaceUpdateEvent}; -use lapdev_db::entities; use lapdev_rpc::{error::ApiError, ConductorService}; use sea_orm::{ActiveModelTrait, ActiveValue}; use tarpc::context; @@ -41,7 +40,7 @@ impl ConductorService for ConductorRpc { workspace_id: Uuid, last_inactivity: Option>, ) { - let _ = entities::workspace::ActiveModel { + let _ = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace_id), last_inactivity: ActiveValue::Set(last_inactivity), ..Default::default() diff --git a/lapdev-conductor/src/scheduler.rs b/crates/conductor/src/scheduler.rs similarity index 53% rename from lapdev-conductor/src/scheduler.rs rename to crates/conductor/src/scheduler.rs index b63ada3..ebf76e3 100644 --- a/lapdev-conductor/src/scheduler.rs +++ b/crates/conductor/src/scheduler.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Result; use itertools::Itertools; use lapdev_common::{CpuCore, PrebuildStatus, WorkspaceHostStatus}; -use lapdev_db::{entities, links}; +use lapdev_db::links; use sea_orm::{ ActiveModelTrait, ActiveValue, ColumnTrait, DatabaseTransaction, EntityTrait, QueryFilter, QueryOrder, QuerySelect, @@ -56,7 +56,7 @@ pub async fn pick_workspce_host( disk: usize, region: String, cpu_overcommit: usize, -) -> Result<(entities::workspace_host::Model, CpuCore)> { +) -> Result<(lapdev_db_entities::workspace_host::Model, CpuCore)> { let workspace_host = decide_workspace_host( txn, prebuild_workspace_host, @@ -97,15 +97,15 @@ async fn decide_workspace_host( memory: usize, disk: usize, region: String, -) -> Result { +) -> Result { // workspace_spreaded controls if Lapdev prefers to pick workspace host // that's busy or idle. When it's true, Lapdev prefers idle server. let workspace_spreaded = true; let cpu_column = if !shared { - entities::workspace_host::Column::AvailableDedicatedCpu + lapdev_db_entities::workspace_host::Column::AvailableDedicatedCpu } else { - entities::workspace_host::Column::AvailableSharedCpu + lapdev_db_entities::workspace_host::Column::AvailableSharedCpu }; let order = if workspace_spreaded { @@ -114,20 +114,21 @@ async fn decide_workspace_host( sea_orm::Order::Asc }; - let query = entities::workspace_host::Entity::find() - .filter(entities::workspace_host::Column::Region.eq(region)) - .filter(entities::workspace_host::Column::DeletedAt.is_null()) + let query = lapdev_db_entities::workspace_host::Entity::find() + .filter(lapdev_db_entities::workspace_host::Column::Region.eq(region)) + .filter(lapdev_db_entities::workspace_host::Column::DeletedAt.is_null()) .filter( - entities::workspace_host::Column::Status.eq(WorkspaceHostStatus::Active.to_string()), + lapdev_db_entities::workspace_host::Column::Status + .eq(WorkspaceHostStatus::Active.to_string()), ) - .filter(entities::workspace_host::Column::AvailableMemory.gte(memory as i32)) - .filter(entities::workspace_host::Column::AvailableDisk.gte(disk as i32)) + .filter(lapdev_db_entities::workspace_host::Column::AvailableMemory.gte(memory as i32)) + .filter(lapdev_db_entities::workspace_host::Column::AvailableDisk.gte(disk as i32)) .filter(cpu_column.gte(cpu as i32)); if let Some(id) = prebuild_workspace_host { if let Some(host) = query .clone() - .filter(entities::workspace_host::Column::Id.eq(id)) + .filter(lapdev_db_entities::workspace_host::Column::Id.eq(id)) .lock_exclusive() .one(txn) .await? @@ -141,14 +142,14 @@ async fn decide_workspace_host( .clone() .order_by(cpu_column, order.clone()) .order_by( - entities::workspace_host::Column::AvailableMemory, + lapdev_db_entities::workspace_host::Column::AvailableMemory, order.clone(), ) .order_by( - entities::workspace_host::Column::AvailableDisk, + lapdev_db_entities::workspace_host::Column::AvailableDisk, order.clone(), ) - .filter(entities::workspace_host::Column::Id.is_in(prebuild_replica_hosts)) + .filter(lapdev_db_entities::workspace_host::Column::Id.is_in(prebuild_replica_hosts)) .limit(1) .lock_exclusive() .one(txn) @@ -162,11 +163,11 @@ async fn decide_workspace_host( .clone() .order_by(cpu_column, order.clone()) .order_by( - entities::workspace_host::Column::AvailableMemory, + lapdev_db_entities::workspace_host::Column::AvailableMemory, order.clone(), ) .order_by( - entities::workspace_host::Column::AvailableDisk, + lapdev_db_entities::workspace_host::Column::AvailableDisk, order.clone(), ) .limit(1) @@ -180,15 +181,15 @@ async fn decide_workspace_host( pub async fn recalcuate_workspce_host( txn: &DatabaseTransaction, - host: &entities::workspace_host::Model, + host: &lapdev_db_entities::workspace_host::Model, cpu_overcommit: usize, -) -> Result { +) -> Result { let existing = get_existing_resource(txn, host).await?; let available_core = available_cores(host.cpu as usize, cpu_overcommit, &existing.cores); let available_memory = host.memory - existing.memory as i32; let available_disk = host.disk - existing.disk as i32; - let model = entities::workspace_host::ActiveModel { + let model = lapdev_db_entities::workspace_host::ActiveModel { id: ActiveValue::Set(host.id), available_shared_cpu: ActiveValue::Set(available_core.available.len() as i32), available_dedicated_cpu: ActiveValue::Set(available_core.dedicated().len() as i32), @@ -204,15 +205,15 @@ pub async fn recalcuate_workspce_host( async fn get_existing_resource( txn: &DatabaseTransaction, - host: &entities::workspace_host::Model, + host: &lapdev_db_entities::workspace_host::Model, ) -> Result { let mut existing = ExistingResource::default(); - let models = entities::workspace::Entity::find() - .find_also_related(entities::machine_type::Entity) - .filter(entities::workspace::Column::HostId.eq(host.id)) - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::ComposeParent.is_null()) + let models = lapdev_db_entities::workspace::Entity::find() + .find_also_related(lapdev_db_entities::machine_type::Entity) + .filter(lapdev_db_entities::workspace::Column::HostId.eq(host.id)) + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::ComposeParent.is_null()) .all(txn) .await?; @@ -228,12 +229,14 @@ async fn get_existing_resource( } } - let result = entities::prebuild::Entity::find() + let result = lapdev_db_entities::prebuild::Entity::find() .find_also_linked(links::PrebuildToMachineType) - .filter(entities::prebuild::Column::HostId.eq(host.id)) - .filter(entities::prebuild::Column::Status.eq(PrebuildStatus::Building.to_string())) - .filter(entities::prebuild::Column::DeletedAt.is_null()) - .filter(entities::prebuild::Column::ByWorkspace.eq(false)) + .filter(lapdev_db_entities::prebuild::Column::HostId.eq(host.id)) + .filter( + lapdev_db_entities::prebuild::Column::Status.eq(PrebuildStatus::Building.to_string()), + ) + .filter(lapdev_db_entities::prebuild::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::prebuild::Column::ByWorkspace.eq(false)) .all(txn) .await?; for (prebuild, machine_type) in result { @@ -296,8 +299,7 @@ mod tests { use std::collections::HashSet; use chrono::Utc; - use lapdev_common::{utils, PrebuildStatus, WorkspaceHostStatus, WorkspaceStatus}; - use lapdev_db::{entities, tests::prepare_db}; + use lapdev_db::tests::prepare_db; use sea_orm::{ActiveModelTrait, ActiveValue, TransactionTrait}; use uuid::Uuid; @@ -357,128 +359,128 @@ mod tests { assert_eq!(cores.available.len(), 0); } - #[tokio::test] - async fn test_recalcuate_workspce_host() { - let db = prepare_db().await.unwrap(); - let txn = db.conn.begin().await.unwrap(); - - let machine_type = entities::machine_type::ActiveModel { - id: ActiveValue::Set(Uuid::new_v4()), - name: ActiveValue::Set("test".to_string()), - shared: ActiveValue::Set(true), - cpu: ActiveValue::Set(2), - memory: ActiveValue::Set(8), - disk: ActiveValue::Set(10), - cost_per_second: ActiveValue::Set(1), - ..Default::default() - } - .insert(&txn) - .await - .unwrap(); - - let workspace_host = entities::workspace_host::ActiveModel { - id: ActiveValue::Set(Uuid::new_v4()), - cpu: ActiveValue::Set(12), - memory: ActiveValue::Set(64), - disk: ActiveValue::Set(1000), - status: ActiveValue::Set(WorkspaceHostStatus::Active.to_string()), - host: ActiveValue::Set("127.0.0.1".to_string()), - port: ActiveValue::Set(6123), - inter_port: ActiveValue::Set(6122), - available_dedicated_cpu: ActiveValue::Set(0), - available_shared_cpu: ActiveValue::Set(0), - available_memory: ActiveValue::Set(0), - available_disk: ActiveValue::Set(0), - region: ActiveValue::Set("".to_string()), - zone: ActiveValue::Set("".to_string()), - ..Default::default() - } - .insert(&txn) - .await - .unwrap(); - - entities::workspace::ActiveModel { - id: ActiveValue::Set(Uuid::new_v4()), - status: ActiveValue::Set(WorkspaceStatus::Running.to_string()), - name: ActiveValue::Set(utils::rand_string(10)), - created_at: ActiveValue::Set(Utc::now().into()), - repo_url: ActiveValue::Set("".to_string()), - repo_name: ActiveValue::Set("".to_string()), - branch: ActiveValue::Set("".to_string()), - commit: ActiveValue::Set("".to_string()), - organization_id: ActiveValue::Set(Uuid::new_v4()), - user_id: ActiveValue::Set(Uuid::new_v4()), - project_id: ActiveValue::Set(None), - host_id: ActiveValue::Set(workspace_host.id), - osuser: ActiveValue::Set("".to_string()), - ssh_private_key: ActiveValue::Set("".to_string()), - ssh_public_key: ActiveValue::Set("".to_string()), - cores: ActiveValue::Set(serde_json::to_string(&[1, 2]).unwrap()), - ssh_port: ActiveValue::Set(None), - ide_port: ActiveValue::Set(None), - prebuild_id: ActiveValue::Set(None), - service: ActiveValue::Set(None), - usage_id: ActiveValue::Set(None), - machine_type_id: ActiveValue::Set(machine_type.id), - auto_start: ActiveValue::Set(true), - is_compose: ActiveValue::Set(false), - compose_parent: ActiveValue::Set(None), - auto_stop: ActiveValue::Set(None), - build_output: ActiveValue::Set(None), - updated_at: ActiveValue::Set(None), - deleted_at: ActiveValue::Set(None), - env: ActiveValue::Set(None), - last_inactivity: ActiveValue::Set(None), - pinned: ActiveValue::Set(false), - } - .insert(&txn) - .await - .unwrap(); - - let project = entities::project::ActiveModel { - id: ActiveValue::Set(Uuid::new_v4()), - name: ActiveValue::Set(utils::rand_string(10)), - created_at: ActiveValue::Set(Utc::now().into()), - created_by: ActiveValue::Set(Uuid::new_v4()), - repo_url: ActiveValue::Set("".to_string()), - repo_name: ActiveValue::Set("".to_string()), - oauth_id: ActiveValue::Set(Uuid::new_v4()), - host_id: ActiveValue::Set(Uuid::new_v4()), - osuser: ActiveValue::Set("".to_string()), - organization_id: ActiveValue::Set(Uuid::new_v4()), - machine_type_id: ActiveValue::Set(machine_type.id), - ..Default::default() - } - .insert(&txn) - .await - .unwrap(); - - entities::prebuild::ActiveModel { - id: ActiveValue::Set(Uuid::new_v4()), - status: ActiveValue::Set(PrebuildStatus::Building.to_string()), - created_at: ActiveValue::Set(Utc::now().into()), - branch: ActiveValue::Set("".to_string()), - commit: ActiveValue::Set("".to_string()), - project_id: ActiveValue::Set(project.id), - host_id: ActiveValue::Set(workspace_host.id), - osuser: ActiveValue::Set("".to_string()), - cores: ActiveValue::Set(serde_json::to_string(&[2, 3]).unwrap()), - by_workspace: ActiveValue::Set(false), - ..Default::default() - } - .insert(&txn) - .await - .unwrap(); - - let workspace_host = scheduler::recalcuate_workspce_host(&txn, &workspace_host, 4) - .await - .unwrap(); - - txn.commit().await.unwrap(); - - assert_eq!(workspace_host.available_dedicated_cpu, 9); - assert_eq!(workspace_host.available_shared_cpu, 9); - assert_eq!(workspace_host.available_memory, 48); - assert_eq!(workspace_host.available_disk, 980); - } + // #[tokio::test] + // async fn test_recalcuate_workspce_host() { + // let db = prepare_db().await.unwrap(); + // let txn = db.conn.begin().await.unwrap(); + + // let machine_type = lapdev_db_entities::machine_type::ActiveModel { + // id: ActiveValue::Set(Uuid::new_v4()), + // name: ActiveValue::Set("test".to_string()), + // shared: ActiveValue::Set(true), + // cpu: ActiveValue::Set(2), + // memory: ActiveValue::Set(8), + // disk: ActiveValue::Set(10), + // cost_per_second: ActiveValue::Set(1), + // ..Default::default() + // } + // .insert(&txn) + // .await + // .unwrap(); + + // let workspace_host = lapdev_db_entities::workspace_host::ActiveModel { + // id: ActiveValue::Set(Uuid::new_v4()), + // cpu: ActiveValue::Set(12), + // memory: ActiveValue::Set(64), + // disk: ActiveValue::Set(1000), + // status: ActiveValue::Set(WorkspaceHostStatus::Active.to_string()), + // host: ActiveValue::Set("127.0.0.1".to_string()), + // port: ActiveValue::Set(6123), + // inter_port: ActiveValue::Set(6122), + // available_dedicated_cpu: ActiveValue::Set(0), + // available_shared_cpu: ActiveValue::Set(0), + // available_memory: ActiveValue::Set(0), + // available_disk: ActiveValue::Set(0), + // region: ActiveValue::Set("".to_string()), + // zone: ActiveValue::Set("".to_string()), + // ..Default::default() + // } + // .insert(&txn) + // .await + // .unwrap(); + + // lapdev_db_entities::workspace::ActiveModel { + // id: ActiveValue::Set(Uuid::new_v4()), + // status: ActiveValue::Set(WorkspaceStatus::Running.to_string()), + // name: ActiveValue::Set(utils::rand_string(10)), + // created_at: ActiveValue::Set(Utc::now().into()), + // repo_url: ActiveValue::Set("".to_string()), + // repo_name: ActiveValue::Set("".to_string()), + // branch: ActiveValue::Set("".to_string()), + // commit: ActiveValue::Set("".to_string()), + // organization_id: ActiveValue::Set(Uuid::new_v4()), + // user_id: ActiveValue::Set(Uuid::new_v4()), + // project_id: ActiveValue::Set(None), + // host_id: ActiveValue::Set(workspace_host.id), + // osuser: ActiveValue::Set("".to_string()), + // ssh_private_key: ActiveValue::Set("".to_string()), + // ssh_public_key: ActiveValue::Set("".to_string()), + // cores: ActiveValue::Set(serde_json::to_string(&[1, 2]).unwrap()), + // ssh_port: ActiveValue::Set(None), + // ide_port: ActiveValue::Set(None), + // prebuild_id: ActiveValue::Set(None), + // service: ActiveValue::Set(None), + // usage_id: ActiveValue::Set(None), + // machine_type_id: ActiveValue::Set(machine_type.id), + // auto_start: ActiveValue::Set(true), + // is_compose: ActiveValue::Set(false), + // compose_parent: ActiveValue::Set(None), + // auto_stop: ActiveValue::Set(None), + // build_output: ActiveValue::Set(None), + // updated_at: ActiveValue::Set(None), + // deleted_at: ActiveValue::Set(None), + // env: ActiveValue::Set(None), + // last_inactivity: ActiveValue::Set(None), + // pinned: ActiveValue::Set(false), + // } + // .insert(&txn) + // .await + // .unwrap(); + + // let project = lapdev_db_entities::project::ActiveModel { + // id: ActiveValue::Set(Uuid::new_v4()), + // name: ActiveValue::Set(utils::rand_string(10)), + // created_at: ActiveValue::Set(Utc::now().into()), + // created_by: ActiveValue::Set(Uuid::new_v4()), + // repo_url: ActiveValue::Set("".to_string()), + // repo_name: ActiveValue::Set("".to_string()), + // oauth_id: ActiveValue::Set(Uuid::new_v4()), + // host_id: ActiveValue::Set(Uuid::new_v4()), + // osuser: ActiveValue::Set("".to_string()), + // organization_id: ActiveValue::Set(Uuid::new_v4()), + // machine_type_id: ActiveValue::Set(machine_type.id), + // ..Default::default() + // } + // .insert(&txn) + // .await + // .unwrap(); + + // lapdev_db_entities::prebuild::ActiveModel { + // id: ActiveValue::Set(Uuid::new_v4()), + // status: ActiveValue::Set(PrebuildStatus::Building.to_string()), + // created_at: ActiveValue::Set(Utc::now().into()), + // branch: ActiveValue::Set("".to_string()), + // commit: ActiveValue::Set("".to_string()), + // project_id: ActiveValue::Set(project.id), + // host_id: ActiveValue::Set(workspace_host.id), + // osuser: ActiveValue::Set("".to_string()), + // cores: ActiveValue::Set(serde_json::to_string(&[2, 3]).unwrap()), + // by_workspace: ActiveValue::Set(false), + // ..Default::default() + // } + // .insert(&txn) + // .await + // .unwrap(); + + // let workspace_host = scheduler::recalcuate_workspce_host(&txn, &workspace_host, 4) + // .await + // .unwrap(); + + // txn.commit().await.unwrap(); + + // assert_eq!(workspace_host.available_dedicated_cpu, 9); + // assert_eq!(workspace_host.available_shared_cpu, 9); + // assert_eq!(workspace_host.available_memory, 48); + // assert_eq!(workspace_host.available_disk, 980); + // } } diff --git a/lapdev-conductor/src/server.rs b/crates/conductor/src/server.rs similarity index 80% rename from lapdev-conductor/src/server.rs rename to crates/conductor/src/server.rs index 8bbac75..1591703 100644 --- a/lapdev-conductor/src/server.rs +++ b/crates/conductor/src/server.rs @@ -13,12 +13,10 @@ use lapdev_common::{ ProjectRequest, RepoBuildInfo, RepoBuildOutput, RepoSource, StartWorkspaceRequest, StopWorkspaceRequest, UsageResourceKind, WorkspaceStatus, WorkspaceUpdateEvent, }; +use lapdev_db::api::DbApi; use lapdev_db::api::LAPDEV_MAX_CPU_ERROR; -use lapdev_db::{api::DbApi, entities}; use lapdev_enterprise::enterprise::Enterprise; -use lapdev_rpc::{ - error::ApiError, long_running_context, spawn_twoway, ConductorService, WorkspaceServiceClient, -}; +use lapdev_rpc::{error::ApiError, long_running_context, WorkspaceServiceClient}; use russh::keys::ssh_key::rand_core::OsRng; use russh::keys::{pkcs8, Algorithm, HashAlg, PrivateKey, PublicKeyBase64}; use sea_orm::{ @@ -26,18 +24,11 @@ use sea_orm::{ TransactionTrait, }; use serde::Deserialize; -use sqlx::postgres::PgNotification; -use tarpc::{ - context, - server::{BaseChannel, Channel}, -}; +use tarpc::context; use tokio::sync::{Mutex, RwLock}; use uuid::Uuid; -use crate::{ - rpc::ConductorRpc, - scheduler::{self, LAPDEV_CPU_OVERCOMMIT}, -}; +use crate::scheduler::{self, LAPDEV_CPU_OVERCOMMIT}; #[derive(Clone, Default)] pub struct WorkspaceUpdate { @@ -74,7 +65,7 @@ struct StatusUpdatePayload { } struct WorkspaceHostInfo { - model: entities::workspace_host::Model, + model: lapdev_db_entities::workspace_host::Model, latency: Option, } @@ -84,7 +75,7 @@ pub struct RepoDetails { pub name: String, pub branch: String, pub commit: String, - pub project: Option, + pub project: Option, pub auth: (String, String), // head branch name pub head: String, @@ -94,10 +85,10 @@ pub struct RepoDetails { #[derive(Clone)] pub struct Conductor { - version: String, - rpc_aborts: Arc>>, + pub version: String, + pub rpc_aborts: Arc>>, rpcs: Arc>>, - ws_hosts: Arc>>, + pub ws_hosts: Arc>>, region: Arc>, data_folder: PathBuf, pub hostnames: Arc>>, @@ -156,414 +147,414 @@ impl Conductor { db, }; - { - let conductor = conductor.clone(); - tokio::spawn(async move { - conductor.monitor_workspace_hosts().await; - }); - } - - { - let conductor = conductor.clone(); - tokio::spawn(async move { - if let Err(e) = conductor.monitor_status_updates().await { - tracing::error!("conductor monitor status updates error: {e}"); - } - }); - } - - { - let conductor = conductor.clone(); - tokio::spawn(async move { - conductor.monitor_auto_start_stop().await; - }); - } + // { + // let conductor = conductor.clone(); + // tokio::spawn(async move { + // conductor.monitor_workspace_hosts().await; + // }); + // } + + // { + // let conductor = conductor.clone(); + // tokio::spawn(async move { + // if let Err(e) = conductor.monitor_status_updates().await { + // tracing::error!("conductor monitor status updates error: {e}"); + // } + // }); + // } + + // { + // let conductor = conductor.clone(); + // tokio::spawn(async move { + // conductor.monitor_auto_start_stop().await; + // }); + // } Ok(conductor) } - async fn listen_table_update(&self) -> Result<()> { - let pool = self - .db - .pool - .clone() - .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; - let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; - listener.listen("table_update").await?; - loop { - let notification = listener.recv().await?; - let _ = self - .handle_workspace_host_update_notification(notification) - .await; - } - } - - async fn handle_workspace_host_update_notification( - &self, - notification: PgNotification, - ) -> Result<()> { - let payload: TableUpdatePayload = serde_json::from_str(notification.payload())?; - let workspace_host = self - .db - .get_workspace_host(payload.id) - .await? - .ok_or_else(|| anyhow!("can't find workspace host"))?; - - if workspace_host.deleted_at.is_some() { - // the workspace host was deleted - self.ws_hosts.write().await.remove(&workspace_host.id); - self.rpcs.write().await.remove(&workspace_host.id); - if let Some(abort) = self.rpc_aborts.write().await.remove(&workspace_host.id) { - abort.abort(); - } - } else { - let mut ws_hosts = self.ws_hosts.write().await; - if let Some(info) = ws_hosts.get_mut(&workspace_host.id) { - info.model = workspace_host; - } else { - let id = workspace_host.id; - let host = workspace_host.host.clone(); - let port = workspace_host.port as u16; - ws_hosts.insert( - id, - WorkspaceHostInfo { - model: workspace_host, - latency: None, - }, - ); - let conductor = self.clone(); - tokio::spawn(async move { - conductor.connect_workspace_host(id, host, port).await; - }); - } - } - - self.decide_current_region().await; - - Ok(()) - } - - async fn monitor_workspace_hosts(&self) { - { - let conductor = self.clone(); - tokio::spawn(async move { - if let Err(e) = conductor.listen_table_update().await { - println!("listen table update error: {e}"); - } else { - println!("listen table update exited"); - } - }); - } - - { - let mut ws_hosts = self.ws_hosts.write().await; - let hosts = self.get_workspace_hosts().await; - - for workspace_host in hosts { - if !ws_hosts.contains_key(&workspace_host.id) { - let id = workspace_host.id; - let host = workspace_host.host.clone(); - let port = workspace_host.port as u16; - ws_hosts.insert( - id, - WorkspaceHostInfo { - model: workspace_host, - latency: None, - }, - ); - let conductor = self.clone(); - tokio::spawn(async move { - conductor.connect_workspace_host(id, host, port).await; - }); - } - } - } - - { - let mut tick = tokio::time::interval(Duration::from_secs(6)); - loop { - tick.tick().await; - let rpcs = { self.rpcs.read().await.clone() }; - for (_, rpc) in rpcs { - tokio::spawn(async move { - let _ = rpc.ping(context::current()).await; - }); - } - } - } - } - - async fn monitor_status_updates(&self) -> Result<()> { - let pool = self - .db - .pool - .clone() - .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; - let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; - listener.listen("status_update").await?; - loop { - let notification = listener.recv().await?; - if let Err(e) = self.handle_status_update_notification(notification).await { - tracing::error!("handle status update notification error: {e:#}"); - } - } - } - - async fn handle_status_update_notification(&self, notification: PgNotification) -> Result<()> { - tracing::debug!( - "status update notification payload: {}", - notification.payload() - ); - let payload: StatusUpdatePayload = serde_json::from_str(notification.payload()) - .with_context(|| format!("trying to deserialize payload {}", notification.payload()))?; - match payload.table.as_str() { - "workspace" => { - let status = WorkspaceStatus::from_str(&payload.status).with_context(|| { - format!("trying to deserialize workspace status {}", payload.status) - })?; - self.add_workspace_update_event( - payload.user_id, - payload.id, - WorkspaceUpdateEvent::Status(status), - ) - .await; - } - "prebuild" => { - let status = PrebuildStatus::from_str(&payload.status).with_context(|| { - format!("trying to deserialize prebuild status {}", payload.status) - })?; - self.add_prebuild_update_event(payload.id, PrebuildUpdateEvent::Status(status)) - .await; - } - "prebuild_replica" => { - let status = - PrebuildReplicaStatus::from_str(&payload.status).with_context(|| { - format!( - "trying to deserialize prebuild replica status {}", - payload.status - ) - })?; - self.add_prebuild_replica_update_event(payload.id, status) - .await; - } - _ => { - return Err(anyhow!("status update table {} not handled", payload.table)); - } - } - Ok(()) - } - - async fn get_workspace_hosts(&self) -> Vec { - entities::workspace_host::Entity::find() - .filter(entities::workspace_host::Column::DeletedAt.is_null()) - .all(&self.db.conn) - .await - .unwrap_or_default() - } - - async fn connect_workspace_host(&self, id: Uuid, host: String, port: u16) { - loop { - { - if !self.ws_hosts.read().await.contains_key(&id) { - // this means the workspace host server is removed, - // so we don't connect to it anymore. - return; - } - } - - if let Err(e) = self.connect_workspace_host_once(id, &host, port).await { - tracing::error!("connect workspace host {host}:{port} failed: {e:?}"); - } - - let _ = entities::workspace_host::ActiveModel { - id: ActiveValue::Set(id), - status: ActiveValue::Set(WorkspaceHostStatus::Inactive.to_string()), - ..Default::default() - } - .update(&self.db.conn) - .await; - - { - self.rpcs.write().await.remove(&id); - self.rpc_aborts.write().await.remove(&id); - } - - let _ = tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } - } - - async fn decide_current_region(&self) { - if !self.enterprise.has_valid_license().await { - return; - } - - let mut region_latencies = HashMap::new(); - for (_, ws_host) in self.ws_hosts.read().await.iter() { - if let Some(latency) = ws_host.latency { - if let Some(existing) = region_latencies.get_mut(&ws_host.model.region) { - if latency < *existing { - *existing = latency; - } - } else { - region_latencies.insert(ws_host.model.region.clone(), latency); - } - } - } - - if let Some((region, _)) = region_latencies.iter().min_by_key(|(_, l)| **l) { - let mut current_region = self.region.write().await; - if &*current_region != region { - tracing::info!("change current region from {current_region:?} to {region:?}"); - *current_region = region.to_owned(); - } - } - } - - async fn connect_workspace_host_once(&self, id: Uuid, host: &str, port: u16) -> Result<()> { - tracing::debug!("start to connect to workspace host {host}:{port}"); - let conn = tarpc::serde_transport::tcp::connect( - (host, port), - tarpc::tokio_serde::formats::Json::default, - ) - .await?; - let (server_chan, client_chan, abort_handle) = spawn_twoway(conn); - let ws_client = - WorkspaceServiceClient::new(tarpc::client::Config::default(), client_chan).spawn(); - { - self.rpcs.write().await.insert(id, ws_client.clone()); - self.rpc_aborts - .write() - .await - .insert(id, abort_handle.clone()); - } - - { - let ws_client = ws_client.clone(); - let conductor = self.clone(); - tokio::spawn(async move { - let start = std::time::Instant::now(); - if let Ok(pong) = ws_client.ping(context::current()).await { - if pong == "pong" { - let latency = start.elapsed().as_millis(); - { - if let Some(info) = conductor.ws_hosts.write().await.get_mut(&id) { - info.latency = Some(latency); - } - } - - conductor.decide_current_region().await; - } - } - }); - } - - let rpc = ConductorRpc { - ws_host_id: id, - conductor: self.clone(), - }; - - let rpc_future = tokio::spawn( - BaseChannel::with_defaults(server_chan) - .execute(rpc.serve()) - .for_each(|resp| async move { - tokio::spawn(resp); - }), - ); - - let ws_version = ws_client.version(context::current()).await; - if ws_version.as_deref().unwrap_or_default() != self.version { - abort_handle.abort(); - let ws_host = self - .db - .get_workspace_host(id) - .await? - .ok_or_else(|| anyhow!("can't find workspace host in db"))?; - if ws_host.status != WorkspaceHostStatus::VersionMismatch.to_string() { - entities::workspace_host::ActiveModel { - id: ActiveValue::Set(id), - status: ActiveValue::Set(WorkspaceHostStatus::VersionMismatch.to_string()), - ..Default::default() - } - .update(&self.db.conn) - .await?; - tracing::error!( - "version mismatch on lapdev ({}), and lapdev-ws ({ws_version:?})", - self.version - ); - } - } else { - entities::workspace_host::ActiveModel { - id: ActiveValue::Set(id), - status: ActiveValue::Set(WorkspaceHostStatus::Active.to_string()), - ..Default::default() - } - .update(&self.db.conn) - .await?; - - let _ = rpc_future.await; - tracing::debug!("workspace host connection ended"); - abort_handle.abort(); - } - - Ok(()) - } - - async fn monitor_auto_start_stop(&self) { - loop { - let orgs = if self.enterprise.license.has_valid().await { - self.enterprise - .auto_start_stop - .get_organization_auto_stop() - .await - .unwrap_or_default() - } else { - vec![] - }; - - if orgs.is_empty() { - tokio::time::sleep(Duration::from_secs(60)).await; - } else { - for org in orgs { - { - let workspaces = self - .enterprise - .auto_start_stop - .organization_auto_stop_workspaces(&org) - .await - .unwrap_or_default(); - for workspace in workspaces { - tracing::info!( - "stop workspace {} because of auto stop timeout", - workspace.name - ); - let _ = self.stop_workspace(workspace, None, None).await; - } - } - - if org.running_workspace_limit > 0 { - let usage = self - .enterprise - .usage - .get_monthly_cost(org.id, None, None, Utc::now().into(), None) - .await - .unwrap_or(0); - if usage as i64 >= org.usage_limit { - if let Ok(workspaces) = self.db.get_org_running_workspaces(org.id).await - { - for workspace in workspaces { - tracing::info!( - "stop workspace {} because of usage limit", - workspace.name - ); - let _ = self.stop_workspace(workspace, None, None).await; - } - } - } - } - } - } - } - } + // async fn listen_table_update(&self) -> Result<()> { + // let pool = self + // .db + // .pool + // .clone() + // .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + // let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + // listener.listen("table_update").await?; + // loop { + // let notification = listener.recv().await?; + // let _ = self + // .handle_workspace_host_update_notification(notification) + // .await; + // } + // } + + // async fn handle_workspace_host_update_notification( + // &self, + // notification: PgNotification, + // ) -> Result<()> { + // let payload: TableUpdatePayload = serde_json::from_str(notification.payload())?; + // let workspace_host = self + // .db + // .get_workspace_host(payload.id) + // .await? + // .ok_or_else(|| anyhow!("can't find workspace host"))?; + + // if workspace_host.deleted_at.is_some() { + // // the workspace host was deleted + // self.ws_hosts.write().await.remove(&workspace_host.id); + // self.rpcs.write().await.remove(&workspace_host.id); + // if let Some(abort) = self.rpc_aborts.write().await.remove(&workspace_host.id) { + // abort.abort(); + // } + // } else { + // let mut ws_hosts = self.ws_hosts.write().await; + // if let Some(info) = ws_hosts.get_mut(&workspace_host.id) { + // info.model = workspace_host; + // } else { + // let id = workspace_host.id; + // let host = workspace_host.host.clone(); + // let port = workspace_host.port as u16; + // ws_hosts.insert( + // id, + // WorkspaceHostInfo { + // model: workspace_host, + // latency: None, + // }, + // ); + // let conductor = self.clone(); + // tokio::spawn(async move { + // conductor.connect_workspace_host(id, host, port).await; + // }); + // } + // } + + // self.decide_current_region().await; + + // Ok(()) + // } + + // async fn monitor_workspace_hosts(&self) { + // { + // let conductor = self.clone(); + // tokio::spawn(async move { + // if let Err(e) = conductor.listen_table_update().await { + // tracing::error!("listen table update error: {e}"); + // } else { + // tracing::info!("listen table update exited"); + // } + // }); + // } + + // { + // let mut ws_hosts = self.ws_hosts.write().await; + // let hosts = self.get_workspace_hosts().await; + + // for workspace_host in hosts { + // if !ws_hosts.contains_key(&workspace_host.id) { + // let id = workspace_host.id; + // let host = workspace_host.host.clone(); + // let port = workspace_host.port as u16; + // ws_hosts.insert( + // id, + // WorkspaceHostInfo { + // model: workspace_host, + // latency: None, + // }, + // ); + // let conductor = self.clone(); + // tokio::spawn(async move { + // conductor.connect_workspace_host(id, host, port).await; + // }); + // } + // } + // } + + // { + // let mut tick = tokio::time::interval(Duration::from_secs(6)); + // loop { + // tick.tick().await; + // let rpcs = { self.rpcs.read().await.clone() }; + // for (_, rpc) in rpcs { + // tokio::spawn(async move { + // let _ = rpc.ping(context::current()).await; + // }); + // } + // } + // } + // } + + // async fn monitor_status_updates(&self) -> Result<()> { + // let pool = self + // .db + // .pool + // .clone() + // .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; + // let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; + // listener.listen("status_update").await?; + // loop { + // let notification = listener.recv().await?; + // if let Err(e) = self.handle_status_update_notification(notification).await { + // tracing::error!("handle status update notification error: {e:#}"); + // } + // } + // } + + // async fn handle_status_update_notification(&self, notification: PgNotification) -> Result<()> { + // tracing::debug!( + // "status update notification payload: {}", + // notification.payload() + // ); + // let payload: StatusUpdatePayload = serde_json::from_str(notification.payload()) + // .with_context(|| format!("trying to deserialize payload {}", notification.payload()))?; + // match payload.table.as_str() { + // "workspace" => { + // let status = WorkspaceStatus::from_str(&payload.status).with_context(|| { + // format!("trying to deserialize workspace status {}", payload.status) + // })?; + // self.add_workspace_update_event( + // payload.user_id, + // payload.id, + // WorkspaceUpdateEvent::Status(status), + // ) + // .await; + // } + // "prebuild" => { + // let status = PrebuildStatus::from_str(&payload.status).with_context(|| { + // format!("trying to deserialize prebuild status {}", payload.status) + // })?; + // self.add_prebuild_update_event(payload.id, PrebuildUpdateEvent::Status(status)) + // .await; + // } + // "prebuild_replica" => { + // let status = + // PrebuildReplicaStatus::from_str(&payload.status).with_context(|| { + // format!( + // "trying to deserialize prebuild replica status {}", + // payload.status + // ) + // })?; + // self.add_prebuild_replica_update_event(payload.id, status) + // .await; + // } + // _ => { + // return Err(anyhow!("status update table {} not handled", payload.table)); + // } + // } + // Ok(()) + // } + + // async fn get_workspace_hosts(&self) -> Vec { + // lapdev_db_entities::workspace_host::Entity::find() + // .filter(lapdev_db_entities::workspace_host::Column::DeletedAt.is_null()) + // .all(&self.db.conn) + // .await + // .unwrap_or_default() + // } + + // async fn connect_workspace_host(&self, id: Uuid, host: String, port: u16) { + // loop { + // { + // if !self.ws_hosts.read().await.contains_key(&id) { + // // this means the workspace host server is removed, + // // so we don't connect to it anymore. + // return; + // } + // } + + // if let Err(e) = self.connect_workspace_host_once(id, &host, port).await { + // tracing::error!("connect workspace host {host}:{port} failed: {e:?}"); + // } + + // let _ = lapdev_db_entities::workspace_host::ActiveModel { + // id: ActiveValue::Set(id), + // status: ActiveValue::Set(WorkspaceHostStatus::Inactive.to_string()), + // ..Default::default() + // } + // .update(&self.db.conn) + // .await; + + // { + // self.rpcs.write().await.remove(&id); + // self.rpc_aborts.write().await.remove(&id); + // } + + // let _ = tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + // } + // } + + // async fn decide_current_region(&self) { + // if !self.enterprise.has_valid_license().await { + // return; + // } + + // let mut region_latencies = HashMap::new(); + // for (_, ws_host) in self.ws_hosts.read().await.iter() { + // if let Some(latency) = ws_host.latency { + // if let Some(existing) = region_latencies.get_mut(&ws_host.model.region) { + // if latency < *existing { + // *existing = latency; + // } + // } else { + // region_latencies.insert(ws_host.model.region.clone(), latency); + // } + // } + // } + + // if let Some((region, _)) = region_latencies.iter().min_by_key(|(_, l)| **l) { + // let mut current_region = self.region.write().await; + // if &*current_region != region { + // tracing::info!("change current region from {current_region:?} to {region:?}"); + // *current_region = region.to_owned(); + // } + // } + // } + + // async fn connect_workspace_host_once(&self, id: Uuid, host: &str, port: u16) -> Result<()> { + // tracing::debug!("start to connect to workspace host {host}:{port}"); + // let conn = tarpc::serde_transport::tcp::connect( + // (host, port), + // tarpc::tokio_serde::formats::Json::default, + // ) + // .await?; + // let (server_chan, client_chan, abort_handle) = spawn_twoway(conn); + // let ws_client = + // WorkspaceServiceClient::new(tarpc::client::Config::default(), client_chan).spawn(); + // { + // self.rpcs.write().await.insert(id, ws_client.clone()); + // self.rpc_aborts + // .write() + // .await + // .insert(id, abort_handle.clone()); + // } + + // { + // let ws_client = ws_client.clone(); + // let conductor = self.clone(); + // tokio::spawn(async move { + // let start = std::time::Instant::now(); + // if let Ok(pong) = ws_client.ping(context::current()).await { + // if pong == "pong" { + // let latency = start.elapsed().as_millis(); + // { + // if let Some(info) = conductor.ws_hosts.write().await.get_mut(&id) { + // info.latency = Some(latency); + // } + // } + + // conductor.decide_current_region().await; + // } + // } + // }); + // } + + // let rpc = ConductorRpc { + // ws_host_id: id, + // conductor: self.clone(), + // }; + + // let rpc_future = tokio::spawn( + // BaseChannel::with_defaults(server_chan) + // .execute(rpc.serve()) + // .for_each(|resp| async move { + // tokio::spawn(resp); + // }), + // ); + + // let ws_version = ws_client.version(context::current()).await; + // if ws_version.as_deref().unwrap_or_default() != self.version { + // abort_handle.abort(); + // let ws_host = self + // .db + // .get_workspace_host(id) + // .await? + // .ok_or_else(|| anyhow!("can't find workspace host in db"))?; + // if ws_host.status != WorkspaceHostStatus::VersionMismatch.to_string() { + // lapdev_db_entities::workspace_host::ActiveModel { + // id: ActiveValue::Set(id), + // status: ActiveValue::Set(WorkspaceHostStatus::VersionMismatch.to_string()), + // ..Default::default() + // } + // .update(&self.db.conn) + // .await?; + // tracing::error!( + // "version mismatch on lapdev ({}), and lapdev-ws ({ws_version:?})", + // self.version + // ); + // } + // } else { + // lapdev_db_entities::workspace_host::ActiveModel { + // id: ActiveValue::Set(id), + // status: ActiveValue::Set(WorkspaceHostStatus::Active.to_string()), + // ..Default::default() + // } + // .update(&self.db.conn) + // .await?; + + // let _ = rpc_future.await; + // tracing::debug!("workspace host connection ended"); + // abort_handle.abort(); + // } + + // Ok(()) + // } + + // async fn monitor_auto_start_stop(&self) { + // loop { + // let orgs = if self.enterprise.license.has_valid().await { + // self.enterprise + // .auto_start_stop + // .get_organization_auto_stop() + // .await + // .unwrap_or_default() + // } else { + // vec![] + // }; + + // if orgs.is_empty() { + // tokio::time::sleep(Duration::from_secs(60)).await; + // } else { + // for org in orgs { + // { + // let workspaces = self + // .enterprise + // .auto_start_stop + // .organization_auto_stop_workspaces(&org) + // .await + // .unwrap_or_default(); + // for workspace in workspaces { + // tracing::info!( + // "stop workspace {} because of auto stop timeout", + // workspace.name + // ); + // let _ = self.stop_workspace(workspace, None, None).await; + // } + // } + + // if org.running_workspace_limit > 0 { + // let usage = self + // .enterprise + // .usage + // .get_monthly_cost(org.id, None, None, Utc::now().into(), None) + // .await + // .unwrap_or(0); + // if usage as i64 >= org.usage_limit { + // if let Ok(workspaces) = self.db.get_org_running_workspaces(org.id).await + // { + // for workspace in workspaces { + // tracing::info!( + // "stop workspace {} because of usage limit", + // workspace.name + // ); + // let _ = self.stop_workspace(workspace, None, None).await; + // } + // } + // } + // } + // } + // } + // } + // } async fn get_raw_repo_details( &self, @@ -635,7 +626,7 @@ impl Conductor { let path = repo_url .split('/') - .last() + .next_back() .ok_or_else(|| ApiError::RepositoryInvalid("invalid repo path".to_string()))?; let path = path .split('?') @@ -657,9 +648,9 @@ impl Conductor { pub async fn find_match_oauth_for_repo( &self, - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, repo: &str, - ) -> Result { + ) -> Result { let oauths = self.db.get_user_all_oauth(user.id).await?; let repo = repo.to_lowercase(); @@ -686,7 +677,7 @@ impl Conductor { pub async fn create_project( &self, - user: entities::user::Model, + user: lapdev_db_entities::user::Model, org_id: Uuid, project: NewProject, ip: Option, @@ -759,7 +750,7 @@ impl Conductor { let txn = self.db.conn.begin().await?; let now = Utc::now(); - let project = entities::project::ActiveModel { + let project = lapdev_db_entities::project::ActiveModel { id: ActiveValue::Set(id), name: ActiveValue::Set(repo.name.clone()), created_at: ActiveValue::Set(now.into()), @@ -800,7 +791,7 @@ impl Conductor { pub async fn project_branches( &self, - project: &entities::project::Model, + project: &lapdev_db_entities::project::Model, auth: (String, String), ) -> Result, ApiError> { let ws_client = { self.rpcs.read().await.get(&project.host_id).cloned() } @@ -824,10 +815,10 @@ impl Conductor { &self, org_id: Uuid, user_id: Uuid, - project: &entities::project::Model, - prebuild: &entities::prebuild::Model, + project: &lapdev_db_entities::project::Model, + prebuild: &lapdev_db_entities::prebuild::Model, repo: RepoDetails, - machine_type: &entities::machine_type::Model, + machine_type: &lapdev_db_entities::machine_type::Model, ip: Option, user_agent: Option, ) -> Result { @@ -837,10 +828,10 @@ impl Conductor { let current_prebuild = prebuild.id; let branch = prebuild.branch.clone(); tokio::spawn(async move { - if let Ok(prebuilds) = entities::prebuild::Entity::find() - .filter(entities::prebuild::Column::DeletedAt.is_null()) - .filter(entities::prebuild::Column::ProjectId.eq(project.id)) - .filter(entities::prebuild::Column::Branch.eq(branch)) + if let Ok(prebuilds) = lapdev_db_entities::prebuild::Entity::find() + .filter(lapdev_db_entities::prebuild::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::prebuild::Column::ProjectId.eq(project.id)) + .filter(lapdev_db_entities::prebuild::Column::Branch.eq(branch)) .all(&conductor.db.conn) .await { @@ -909,13 +900,13 @@ impl Conductor { pub async fn create_project_prebuild( &self, - user: &entities::user::Model, - project: &entities::project::Model, - ws: Option<&entities::workspace::Model>, + user: &lapdev_db_entities::user::Model, + project: &lapdev_db_entities::project::Model, + ws: Option<&lapdev_db_entities::workspace::Model>, repo: &RepoDetails, ip: Option, user_agent: Option, - ) -> Result { + ) -> Result { let id = uuid::Uuid::new_v4(); let osuser = self.get_osuser(project.organization_id); @@ -976,7 +967,7 @@ impl Conductor { ) .await?; - let result = entities::prebuild::ActiveModel { + let result = lapdev_db_entities::prebuild::ActiveModel { id: ActiveValue::Set(id), deleted_at: ActiveValue::Set(None), created_at: ActiveValue::Set(now.into()), @@ -1037,7 +1028,7 @@ impl Conductor { let org_id = project.organization_id; let project = project.to_owned(); tokio::spawn(async move { - let mut update_prebuild = entities::prebuild::ActiveModel { + let mut update_prebuild = lapdev_db_entities::prebuild::ActiveModel { id: ActiveValue::Set(prebuild.id), ..Default::default() }; @@ -1128,13 +1119,13 @@ impl Conductor { #[allow(clippy::too_many_arguments)] async fn create_workspace_model( &self, - org: &entities::organization::Model, - user: &entities::user::Model, + org: &lapdev_db_entities::organization::Model, + user: &lapdev_db_entities::user::Model, repo: &RepoDetails, - machine_type: &entities::machine_type::Model, + machine_type: &lapdev_db_entities::machine_type::Model, ip: Option, user_agent: Option, - ) -> Result { + ) -> Result { let name = format!("{}-{}", repo.name, rand_string(12)); let (id_rsa, public_key) = self.generate_key_pair()?; let osuser = self.get_osuser(org.id); @@ -1182,9 +1173,9 @@ impl Conductor { }; let replica_hosts = if let Some(prebuild) = prebuild.as_ref() { - entities::prebuild_replica::Entity::find() - .filter(entities::prebuild_replica::Column::DeletedAt.is_null()) - .filter(entities::prebuild_replica::Column::PrebuildId.eq(prebuild.id)) + lapdev_db_entities::prebuild_replica::Entity::find() + .filter(lapdev_db_entities::prebuild_replica::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::prebuild_replica::Column::PrebuildId.eq(prebuild.id)) .all(&txn) .await? .iter() @@ -1212,7 +1203,7 @@ impl Conductor { })?; let workspace_id = Uuid::new_v4(); let now = Utc::now(); - let ws = entities::workspace::ActiveModel { + let ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace_id), deleted_at: ActiveValue::Set(None), name: ActiveValue::Set(name.clone()), @@ -1271,7 +1262,7 @@ impl Conductor { // copy the prebuild repo content from the prebuild folder to workspace folder async fn copy_prebuild_content( &self, - prebuild: &entities::prebuild::Model, + prebuild: &lapdev_db_entities::prebuild::Model, info: RepoBuildInfo, ws_client: &WorkspaceServiceClient, ) -> Result<(), ApiError> { @@ -1290,9 +1281,9 @@ impl Conductor { async fn copy_prebuild_image( &self, - prebuild: &entities::prebuild::Model, + prebuild: &lapdev_db_entities::prebuild::Model, output: &RepoBuildOutput, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, ws_client: &WorkspaceServiceClient, ) -> Result<(), ApiError> { if ws.host_id != prebuild.host_id { @@ -1303,7 +1294,7 @@ impl Conductor { let replica = if let Some(replica) = replica { if replica.status == PrebuildReplicaStatus::Failed.to_string() { // if the replica transfer was failed, we just try it again - let replica = entities::prebuild_replica::ActiveModel { + let replica = lapdev_db_entities::prebuild_replica::ActiveModel { id: ActiveValue::Set(replica.id), status: ActiveValue::Set(PrebuildReplicaStatus::Transferring.to_string()), ..Default::default() @@ -1368,12 +1359,12 @@ impl Conductor { async fn create_prebuild_replica( &self, user_id: Uuid, - prebuild: &entities::prebuild::Model, + prebuild: &lapdev_db_entities::prebuild::Model, host_id: Uuid, repo_name: &str, ) -> Result<(), ApiError> { let id = Uuid::new_v4(); - let result = entities::prebuild_replica::ActiveModel { + let result = lapdev_db_entities::prebuild_replica::ActiveModel { id: ActiveValue::Set(id), deleted_at: ActiveValue::Set(None), created_at: ActiveValue::Set(Utc::now().into()), @@ -1405,7 +1396,7 @@ impl Conductor { async fn transfer_prebuild_replica( &self, replica_id: Uuid, - prebuild: &entities::prebuild::Model, + prebuild: &lapdev_db_entities::prebuild::Model, host_id: Uuid, repo_name: &str, ) { @@ -1427,7 +1418,7 @@ impl Conductor { } else { PrebuildReplicaStatus::Ready }; - let _ = entities::prebuild_replica::ActiveModel { + let _ = lapdev_db_entities::prebuild_replica::ActiveModel { id: ActiveValue::Set(replica_id), status: ActiveValue::Set(status.to_string()), ..Default::default() @@ -1439,7 +1430,7 @@ impl Conductor { async fn do_transfer_prebuild_replica( &self, - prebuild: &entities::prebuild::Model, + prebuild: &lapdev_db_entities::prebuild::Model, host_id: Uuid, repo_name: &str, ) -> Result<(), ApiError> { @@ -1485,8 +1476,8 @@ impl Conductor { pub async fn get_project_repo_details( &self, - user: &entities::user::Model, - project: &entities::project::Model, + user: &lapdev_db_entities::user::Model, + project: &lapdev_db_entities::project::Model, branch: Option<&str>, ) -> Result { let auth = if let Ok(Some(oauth)) = self.db.get_oauth(project.oauth_id).await { @@ -1533,7 +1524,7 @@ impl Conductor { async fn get_repo_details( &self, org_id: Uuid, - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, source: &RepoSource, branch: Option<&str>, ) -> Result { @@ -1571,13 +1562,13 @@ impl Conductor { async fn prepare_prebuild( &self, - user: &entities::user::Model, - ws: &entities::workspace::Model, - project: &entities::project::Model, + user: &lapdev_db_entities::user::Model, + ws: &lapdev_db_entities::workspace::Model, + project: &lapdev_db_entities::project::Model, repo: &RepoDetails, ip: Option, user_agent: Option, - ) -> Result, ApiError> { + ) -> Result, ApiError> { let prebuild = if let Some(prebuild_id) = ws.prebuild_id { self.db.get_prebuild(prebuild_id).await? } else { @@ -1605,7 +1596,7 @@ impl Conductor { ) .await? .ok_or_else(|| anyhow!("prebuild should exist"))?; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), prebuild_id: ActiveValue::Set(Some(prebuild.id)), ..Default::default() @@ -1672,12 +1663,12 @@ impl Conductor { #[allow(clippy::too_many_arguments)] async fn prepare_workspace_image( &self, - user: &entities::user::Model, + user: &lapdev_db_entities::user::Model, repo: &RepoDetails, env: Vec<(String, String)>, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, ws_client: &WorkspaceServiceClient, - machine_type: &entities::machine_type::Model, + machine_type: &lapdev_db_entities::machine_type::Model, ip: Option, user_agent: Option, ) -> Result<(Option, RepoBuildResult), ApiError> { @@ -1737,7 +1728,7 @@ impl Conductor { machine_type.cost_per_second, ) .await?; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(WorkspaceStatus::Building.to_string()), usage_id: ActiveValue::Set(Some(usage.id)), @@ -1758,12 +1749,12 @@ impl Conductor { async fn create_workspace_from_output( &self, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, prebuild_id: Option, output: RepoBuildResult, env: Vec<(String, String)>, ws_client: &WorkspaceServiceClient, - machine_type: &entities::machine_type::Model, + machine_type: &lapdev_db_entities::machine_type::Model, ) -> Result<(), ApiError> { let flatten_env = env .iter() @@ -1914,7 +1905,7 @@ impl Conductor { usage.id }; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), ssh_port: ActiveValue::Set(ssh_port.map(|port| port as i32)), ide_port: ActiveValue::Set(ide_port.map(|port| port as i32)), @@ -1933,7 +1924,7 @@ impl Conductor { ws.id } else { let service_ws_id = Uuid::new_v4(); - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(service_ws_id), name: ActiveValue::Set(workspace_name), created_at: ActiveValue::Set(Utc::now().into()), @@ -1974,7 +1965,7 @@ impl Conductor { if !exposed_ports.is_empty() { for (port, host_port) in exposed_ports { - entities::workspace_port::ActiveModel { + lapdev_db_entities::workspace_port::ActiveModel { id: ActiveValue::Set(Uuid::new_v4()), deleted_at: ActiveValue::Set(None), workspace_id: ActiveValue::Set(actual_ws_id), @@ -2000,10 +1991,10 @@ impl Conductor { #[allow(clippy::too_many_arguments)] async fn do_create_workspace( &self, - user: &entities::user::Model, - ws: &entities::workspace::Model, + user: &lapdev_db_entities::user::Model, + ws: &lapdev_db_entities::workspace::Model, repo: RepoDetails, - machine_type: &entities::machine_type::Model, + machine_type: &lapdev_db_entities::machine_type::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { @@ -2040,7 +2031,7 @@ impl Conductor { ) .await; - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(ws.organization_id), has_running_workspace: ActiveValue::Set(true), ..Default::default() @@ -2053,8 +2044,8 @@ impl Conductor { pub async fn create_workspace( &self, - user: entities::user::Model, - org: entities::organization::Model, + user: lapdev_db_entities::user::Model, + org: lapdev_db_entities::organization::Model, workspace: NewWorkspace, ip: Option, user_agent: Option, @@ -2126,7 +2117,7 @@ impl Conductor { } } } - let _ = entities::workspace::ActiveModel { + let _ = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(WorkspaceStatus::Failed.to_string()), ..Default::default() @@ -2181,10 +2172,10 @@ impl Conductor { pub async fn update_workspace_status( &self, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, status: WorkspaceStatus, - ) -> Result { - let update_ws = entities::workspace::ActiveModel { + ) -> Result { + let update_ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(status.to_string()), ..Default::default() @@ -2254,7 +2245,7 @@ impl Conductor { pub async fn delete_workspace( &self, - workspace: &entities::workspace::Model, + workspace: &lapdev_db_entities::workspace::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { @@ -2297,7 +2288,7 @@ impl Conductor { .end_usage(&txn, usage_id, now.into()) .await?; } - let update_ws = entities::workspace::ActiveModel { + let update_ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace.id), status: ActiveValue::Set(WorkspaceStatus::Deleting.to_string()), updated_at: ActiveValue::Set(Some(now.into())), @@ -2309,9 +2300,9 @@ impl Conductor { txn.commit().await?; let compose_services = if ws.is_compose { - entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::ComposeParent.eq(ws.id)) + lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::ComposeParent.eq(ws.id)) .all(&self.db.conn) .await? } else { @@ -2343,7 +2334,7 @@ impl Conductor { async fn do_delete_workspace( &self, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, network: Option, ip: Option, user_agent: Option, @@ -2394,7 +2385,7 @@ impl Conductor { .db .get_workspace_host_with_lock(&txn, ws.host_id) .await?; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(status.to_string()), deleted_at: ActiveValue::Set(Some(now.into())), @@ -2439,7 +2430,7 @@ impl Conductor { }; tracing::error!("delete workspace {} failed: {e}", ws.id); let status = WorkspaceStatus::DeleteFailed; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(status.to_string()), updated_at: ActiveValue::Set(Some(now.into())), @@ -2459,7 +2450,7 @@ impl Conductor { async fn update_org_has_running_workspace(&self, org_id: Uuid) -> Result<()> { let ws = self.db.get_org_running_workspace(org_id).await?; if ws.is_none() { - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(org_id), has_running_workspace: ActiveValue::Set(false), ..Default::default() @@ -2472,7 +2463,7 @@ impl Conductor { pub async fn start_workspace( &self, - workspace: entities::workspace::Model, + workspace: lapdev_db_entities::workspace::Model, // waiting for the actual start on workspace host waiting: bool, ip: Option, @@ -2513,7 +2504,7 @@ impl Conductor { user_agent, ) .await?; - let update_ws = entities::workspace::ActiveModel { + let update_ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace.id), updated_at: ActiveValue::Set(Some(now.into())), status: ActiveValue::Set(WorkspaceStatus::Starting.to_string()), @@ -2527,9 +2518,9 @@ impl Conductor { ws_client.ok_or_else(|| anyhow!("can't connect to the workspace servic client"))?; let compose_services = if ws.is_compose { - entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::ComposeParent.eq(ws.id)) + lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::ComposeParent.eq(ws.id)) .all(&self.db.conn) .await? } else { @@ -2560,7 +2551,7 @@ impl Conductor { pub async fn stop_workspace( &self, - workspace: entities::workspace::Model, + workspace: lapdev_db_entities::workspace::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { @@ -2594,7 +2585,7 @@ impl Conductor { .end_usage(&txn, usage_id, now.into()) .await?; } - let update_ws = entities::workspace::ActiveModel { + let update_ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace.id), status: ActiveValue::Set(WorkspaceStatus::Stopping.to_string()), updated_at: ActiveValue::Set(Some(now.into())), @@ -2609,9 +2600,9 @@ impl Conductor { ws_client.ok_or_else(|| anyhow!("can't connect to the workspace servic client"))?; let compose_services = if ws.is_compose { - entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::ComposeParent.eq(ws.id)) + lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::ComposeParent.eq(ws.id)) .all(&self.db.conn) .await? } else { @@ -2637,7 +2628,7 @@ impl Conductor { pub async fn rebuild_workspace( &self, - workspace: entities::workspace::Model, + workspace: lapdev_db_entities::workspace::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { @@ -2715,7 +2706,7 @@ impl Conductor { .await?; usage.id }; - let ws = entities::workspace::ActiveModel { + let ws = lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace.id), status: ActiveValue::Set(WorkspaceStatus::Building.to_string()), usage_id: ActiveValue::Set(Some(usage_id)), @@ -2742,7 +2733,7 @@ impl Conductor { .end_usage(&txn, usage_id, now.into()) .await?; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(workspace.id), status: ActiveValue::Set(WorkspaceStatus::Failed.to_string()), updated_at: ActiveValue::Set(Some(now.into())), @@ -2763,16 +2754,16 @@ impl Conductor { async fn do_rebuild_workspace( &self, - ws: &entities::workspace::Model, - machine_type: &entities::machine_type::Model, + ws: &lapdev_db_entities::workspace::Model, + machine_type: &lapdev_db_entities::machine_type::Model, ) -> Result<(), ApiError> { let ws_client = { self.rpcs.read().await.get(&ws.host_id).cloned() } .ok_or_else(|| anyhow!("can't find the workspace host rpc client"))?; let compose_services = if ws.is_compose { - entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::ComposeParent.eq(ws.id)) + lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::ComposeParent.eq(ws.id)) .all(&self.db.conn) .await? } else { @@ -2792,7 +2783,7 @@ impl Conductor { ) .await??; let now = Utc::now(); - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(service_ws.id), status: ActiveValue::Set(WorkspaceStatus::Deleted.to_string()), deleted_at: ActiveValue::Set(Some(now.into())), @@ -2803,13 +2794,13 @@ impl Conductor { } let now = Utc::now(); - entities::workspace_port::Entity::update_many() - .set(entities::workspace_port::ActiveModel { + lapdev_db_entities::workspace_port::Entity::update_many() + .set(lapdev_db_entities::workspace_port::ActiveModel { deleted_at: ActiveValue::Set(Some(now.into())), ..Default::default() }) - .filter(entities::workspace_port::Column::WorkspaceId.eq(ws.id)) - .filter(entities::workspace_port::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace_port::Column::WorkspaceId.eq(ws.id)) + .filter(lapdev_db_entities::workspace_port::Column::DeletedAt.is_null()) .exec(&self.db.conn) .await?; @@ -2898,7 +2889,7 @@ impl Conductor { ) .await; - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(ws.organization_id), has_running_workspace: ActiveValue::Set(true), ..Default::default() @@ -2912,7 +2903,7 @@ impl Conductor { async fn do_start_workspace( &self, ws_client: &WorkspaceServiceClient, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, ) -> Result<()> { let result = ws_client .start_workspace( @@ -2954,7 +2945,7 @@ impl Conductor { } else { None }; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(status.to_string()), usage_id: ActiveValue::Set(usage.map(|u| u.id)), @@ -2966,7 +2957,7 @@ impl Conductor { .await?; txn.commit().await?; - entities::organization::ActiveModel { + lapdev_db_entities::organization::ActiveModel { id: ActiveValue::Set(ws.organization_id), has_running_workspace: ActiveValue::Set(true), ..Default::default() @@ -2984,7 +2975,7 @@ impl Conductor { }; tracing::error!("start workspace {} failed: {e}", ws.id); let status = WorkspaceStatus::Failed; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), updated_at: ActiveValue::Set(Some(now.into())), status: ActiveValue::Set(WorkspaceStatus::Failed.to_string()), @@ -3002,7 +2993,7 @@ impl Conductor { async fn do_stop_workspace( &self, ws_client: &WorkspaceServiceClient, - ws: &entities::workspace::Model, + ws: &lapdev_db_entities::workspace::Model, ) -> Result<()> { let result = ws_client .stop_workspace( @@ -3017,7 +3008,7 @@ impl Conductor { match result { Ok(_) => { let status = WorkspaceStatus::Stopped; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(status.to_string()), updated_at: ActiveValue::Set(Some(now.into())), @@ -3034,7 +3025,7 @@ impl Conductor { }; tracing::error!("stop workspace {} failed: {e}", ws.id); let status = WorkspaceStatus::StopFailed; - entities::workspace::ActiveModel { + lapdev_db_entities::workspace::ActiveModel { id: ActiveValue::Set(ws.id), status: ActiveValue::Set(status.to_string()), updated_at: ActiveValue::Set(Some(now.into())), @@ -3133,13 +3124,13 @@ impl Conductor { &self, org_id: Uuid, user_id: Uuid, - project: &entities::project::Model, + project: &lapdev_db_entities::project::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { let now = Utc::now(); let txn = self.db.conn.begin().await?; - let project = entities::project::ActiveModel { + let project = lapdev_db_entities::project::ActiveModel { id: ActiveValue::Set(project.id), deleted_at: ActiveValue::Set(Some(now.into())), ..Default::default() @@ -3179,10 +3170,13 @@ impl Conductor { Ok(()) } - async fn do_delete_project(&self, project: &entities::project::Model) -> Result<(), ApiError> { - let prebuilds = entities::prebuild::Entity::find() - .filter(entities::prebuild::Column::ProjectId.eq(project.id)) - .filter(entities::prebuild::Column::DeletedAt.is_null()) + async fn do_delete_project( + &self, + project: &lapdev_db_entities::project::Model, + ) -> Result<(), ApiError> { + let prebuilds = lapdev_db_entities::prebuild::Entity::find() + .filter(lapdev_db_entities::prebuild::Column::ProjectId.eq(project.id)) + .filter(lapdev_db_entities::prebuild::Column::DeletedAt.is_null()) .all(&self.db.conn) .await?; @@ -3207,8 +3201,8 @@ impl Conductor { &self, org_id: Uuid, user_id: Uuid, - project: &entities::project::Model, - prebuild: &entities::prebuild::Model, + project: &lapdev_db_entities::project::Model, + prebuild: &lapdev_db_entities::prebuild::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { @@ -3219,9 +3213,9 @@ impl Conductor { } let txn = self.db.conn.begin().await?; - if entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::PrebuildId.eq(prebuild.id)) + if lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::PrebuildId.eq(prebuild.id)) .lock_exclusive() .one(&txn) .await? @@ -3232,7 +3226,7 @@ impl Conductor { )); } - entities::prebuild::ActiveModel { + lapdev_db_entities::prebuild::ActiveModel { id: ActiveValue::Set(prebuild.id), deleted_at: ActiveValue::Set(Some(Utc::now().into())), ..Default::default() @@ -3280,7 +3274,7 @@ impl Conductor { async fn do_delete_prebuild( &self, - prebuild: &entities::prebuild::Model, + prebuild: &lapdev_db_entities::prebuild::Model, ) -> Result<(), ApiError> { let ws_client = { self.rpcs.read().await.get(&prebuild.host_id).cloned() } .ok_or_else(|| anyhow!("can't find the workspace host rpc client"))?; @@ -3299,9 +3293,9 @@ impl Conductor { ) .await??; - let replicas = entities::prebuild_replica::Entity::find() - .filter(entities::prebuild_replica::Column::DeletedAt.is_null()) - .filter(entities::prebuild_replica::Column::PrebuildId.eq(prebuild.id)) + let replicas = lapdev_db_entities::prebuild_replica::Entity::find() + .filter(lapdev_db_entities::prebuild_replica::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::prebuild_replica::Column::PrebuildId.eq(prebuild.id)) .all(&self.db.conn) .await?; for replica in replicas { @@ -3317,7 +3311,7 @@ impl Conductor { output.clone(), ) .await??; - entities::prebuild_replica::ActiveModel { + lapdev_db_entities::prebuild_replica::ActiveModel { id: ActiveValue::Set(replica.id), deleted_at: ActiveValue::Set(Some(Utc::now().into())), ..Default::default() @@ -3340,7 +3334,7 @@ impl Conductor { ip: Option, user_agent: Option, ) -> Result<(), ApiError> { - let prebuild = entities::prebuild::Entity::find_by_id(prebuild_id) + let prebuild = lapdev_db_entities::prebuild::Entity::find_by_id(prebuild_id) .one(&self.db.conn) .await? .ok_or_else(|| anyhow!("no prebuild"))?; @@ -3349,10 +3343,10 @@ impl Conductor { } let project = self.db.get_project(prebuild.project_id).await?; - let latest = entities::prebuild::Entity::find() - .filter(entities::prebuild::Column::ProjectId.eq(prebuild.project_id)) - .filter(entities::prebuild::Column::DeletedAt.is_null()) - .order_by_desc(entities::prebuild::Column::CreatedAt) + let latest = lapdev_db_entities::prebuild::Entity::find() + .filter(lapdev_db_entities::prebuild::Column::ProjectId.eq(prebuild.project_id)) + .filter(lapdev_db_entities::prebuild::Column::DeletedAt.is_null()) + .order_by_desc(lapdev_db_entities::prebuild::Column::CreatedAt) .one(&self.db.conn) .await? .ok_or_else(|| anyhow!("project doesn't have prebuilds"))?; @@ -3380,14 +3374,14 @@ impl Conductor { &self, org_id: Uuid, user_id: Uuid, - project: &entities::project::Model, - prebuild: &entities::prebuild::Model, + project: &lapdev_db_entities::project::Model, + prebuild: &lapdev_db_entities::prebuild::Model, ip: Option, user_agent: Option, ) -> Result<(), ApiError> { - if entities::workspace::Entity::find() - .filter(entities::workspace::Column::DeletedAt.is_null()) - .filter(entities::workspace::Column::PrebuildId.eq(prebuild.id)) + if lapdev_db_entities::workspace::Entity::find() + .filter(lapdev_db_entities::workspace::Column::DeletedAt.is_null()) + .filter(lapdev_db_entities::workspace::Column::PrebuildId.eq(prebuild.id)) .one(&self.db.conn) .await? .is_some() diff --git a/lapdev-dashboard/Cargo.lock b/crates/dashboard/Cargo.lock similarity index 100% rename from lapdev-dashboard/Cargo.lock rename to crates/dashboard/Cargo.lock diff --git a/crates/dashboard/Cargo.toml b/crates/dashboard/Cargo.toml new file mode 100644 index 0000000..83a5e13 --- /dev/null +++ b/crates/dashboard/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "lapdev-dashboard" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +gloo-net = "0.6.0" +gloo-timers = { version = "0.2", features = ["futures"] } +wasm-bindgen = "0.2.100" +web-sys = { version = "0.3.77", features = ["Navigator", "Clipboard", "DomRect", "Element"] } +urlencoding = "2.1.3" +lucide-leptos = "1.2.0" +uuid = { version = "1.16.0", features = ["v4", "serde", "js"] } +getrandom = { version = "0.3", features = ["wasm_js"] } +tailwind_fuse = { version = "0.3.2", features = ["variant"] } +leptos = { version = "0.8.2", features = ["csr"] } +leptos_router = { version = "0.8.2" } +rust_decimal.workspace = true +chrono.workspace = true +serde.workspace = true +serde_json.workspace = true +anyhow.workspace = true +futures.workspace = true +lapdev-common.workspace = true +lapdev-api-hrpc.workspace = true diff --git a/lapdev-dashboard/Trunk.toml b/crates/dashboard/Trunk.toml similarity index 100% rename from lapdev-dashboard/Trunk.toml rename to crates/dashboard/Trunk.toml diff --git a/lapdev-dashboard/main.css b/crates/dashboard/dist/.stage/main-10252f88a73a5884.css similarity index 100% rename from lapdev-dashboard/main.css rename to crates/dashboard/dist/.stage/main-10252f88a73a5884.css diff --git a/lapdev-dashboard/index.html b/crates/dashboard/index.html similarity index 100% rename from lapdev-dashboard/index.html rename to crates/dashboard/index.html diff --git a/crates/dashboard/main.css b/crates/dashboard/main.css new file mode 100644 index 0000000..76b9cd8 --- /dev/null +++ b/crates/dashboard/main.css @@ -0,0 +1,2727 @@ +/* +! tailwindcss v3.4.1 | MIT License | https://tailwindcss.com +*/ + +/* +1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4) +2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116) +*/ + +*, +::before, +::after { + box-sizing: border-box; + /* 1 */ + border-width: 0; + /* 2 */ + border-style: solid; + /* 2 */ + border-color: #E5E7EB; + /* 2 */ +} + +::before, +::after { + --tw-content: ''; +} + +/* +1. Use a consistent sensible line-height in all browsers. +2. Prevent adjustments of font size after orientation changes in iOS. +3. Use a more readable tab size. +4. Use the user's configured `sans` font-family by default. +5. Use the user's configured `sans` font-feature-settings by default. +6. Use the user's configured `sans` font-variation-settings by default. +7. Disable tap highlights on iOS +*/ + +html, +:host { + line-height: 1.5; + /* 1 */ + -webkit-text-size-adjust: 100%; + /* 2 */ + -moz-tab-size: 4; + /* 3 */ + -o-tab-size: 4; + tab-size: 4; + /* 3 */ + font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; + /* 4 */ + font-feature-settings: normal; + /* 5 */ + font-variation-settings: normal; + /* 6 */ + -webkit-tap-highlight-color: transparent; + /* 7 */ +} + +/* +1. Remove the margin in all browsers. +2. Inherit line-height from `html` so users can set them as a class directly on the `html` element. +*/ + +body { + margin: 0; + /* 1 */ + line-height: inherit; + /* 2 */ +} + +/* +1. Add the correct height in Firefox. +2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655) +3. Ensure horizontal rules are visible by default. +*/ + +hr { + height: 0; + /* 1 */ + color: inherit; + /* 2 */ + border-top-width: 1px; + /* 3 */ +} + +/* +Add the correct text decoration in Chrome, Edge, and Safari. +*/ + +abbr:where([title]) { + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; +} + +/* +Remove the default font size and weight for headings. +*/ + +h1, +h2, +h3, +h4, +h5, +h6 { + font-size: inherit; + font-weight: inherit; +} + +/* +Reset links to optimize for opt-in styling instead of opt-out. +*/ + +a { + color: inherit; + text-decoration: inherit; +} + +/* +Add the correct font weight in Edge and Safari. +*/ + +b, +strong { + font-weight: bolder; +} + +/* +1. Use the user's configured `mono` font-family by default. +2. Use the user's configured `mono` font-feature-settings by default. +3. Use the user's configured `mono` font-variation-settings by default. +4. Correct the odd `em` font sizing in all browsers. +*/ + +code, +kbd, +samp, +pre { + font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + /* 1 */ + font-feature-settings: normal; + /* 2 */ + font-variation-settings: normal; + /* 3 */ + font-size: 1em; + /* 4 */ +} + +/* +Add the correct font size in all browsers. +*/ + +small { + font-size: 80%; +} + +/* +Prevent `sub` and `sup` elements from affecting the line height in all browsers. +*/ + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} + +sub { + bottom: -0.25em; +} + +sup { + top: -0.5em; +} + +/* +1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297) +2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016) +3. Remove gaps between table borders by default. +*/ + +table { + text-indent: 0; + /* 1 */ + border-color: inherit; + /* 2 */ + border-collapse: collapse; + /* 3 */ +} + +/* +1. Change the font styles in all browsers. +2. Remove the margin in Firefox and Safari. +3. Remove default padding in all browsers. +*/ + +button, +input, +optgroup, +select, +textarea { + font-family: inherit; + /* 1 */ + font-feature-settings: inherit; + /* 1 */ + font-variation-settings: inherit; + /* 1 */ + font-size: 100%; + /* 1 */ + font-weight: inherit; + /* 1 */ + line-height: inherit; + /* 1 */ + color: inherit; + /* 1 */ + margin: 0; + /* 2 */ + padding: 0; + /* 3 */ +} + +/* +Remove the inheritance of text transform in Edge and Firefox. +*/ + +button, +select { + text-transform: none; +} + +/* +1. Correct the inability to style clickable types in iOS and Safari. +2. Remove default button styles. +*/ + +button, +[type='button'], +[type='reset'], +[type='submit'] { + -webkit-appearance: button; + /* 1 */ + background-color: transparent; + /* 2 */ + background-image: none; + /* 2 */ +} + +/* +Use the modern Firefox focus style for all focusable elements. +*/ + +:-moz-focusring { + outline: auto; +} + +/* +Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737) +*/ + +:-moz-ui-invalid { + box-shadow: none; +} + +/* +Add the correct vertical alignment in Chrome and Firefox. +*/ + +progress { + vertical-align: baseline; +} + +/* +Correct the cursor style of increment and decrement buttons in Safari. +*/ + +::-webkit-inner-spin-button, +::-webkit-outer-spin-button { + height: auto; +} + +/* +1. Correct the odd appearance in Chrome and Safari. +2. Correct the outline style in Safari. +*/ + +[type='search'] { + -webkit-appearance: textfield; + /* 1 */ + outline-offset: -2px; + /* 2 */ +} + +/* +Remove the inner padding in Chrome and Safari on macOS. +*/ + +::-webkit-search-decoration { + -webkit-appearance: none; +} + +/* +1. Correct the inability to style clickable types in iOS and Safari. +2. Change font properties to `inherit` in Safari. +*/ + +::-webkit-file-upload-button { + -webkit-appearance: button; + /* 1 */ + font: inherit; + /* 2 */ +} + +/* +Add the correct display in Chrome and Safari. +*/ + +summary { + display: list-item; +} + +/* +Removes the default spacing and border for appropriate elements. +*/ + +blockquote, +dl, +dd, +h1, +h2, +h3, +h4, +h5, +h6, +hr, +figure, +p, +pre { + margin: 0; +} + +fieldset { + margin: 0; + padding: 0; +} + +legend { + padding: 0; +} + +ol, +ul, +menu { + list-style: none; + margin: 0; + padding: 0; +} + +/* +Reset default styling for dialogs. +*/ + +dialog { + padding: 0; +} + +/* +Prevent resizing textareas horizontally by default. +*/ + +textarea { + resize: vertical; +} + +/* +1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300) +2. Set the default placeholder color to the user's configured gray 400 color. +*/ + +input::-moz-placeholder, textarea::-moz-placeholder { + opacity: 1; + /* 1 */ + color: #9CA3AF; + /* 2 */ +} + +input::placeholder, +textarea::placeholder { + opacity: 1; + /* 1 */ + color: #9CA3AF; + /* 2 */ +} + +/* +Set the default cursor for buttons. +*/ + +button, +[role="button"] { + cursor: pointer; +} + +/* +Make sure disabled buttons don't get the pointer cursor. +*/ + +:disabled { + cursor: default; +} + +/* +1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14) +2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210) + This can trigger a poorly considered lint error in some tools but is included by design. +*/ + +img, +svg, +video, +canvas, +audio, +iframe, +embed, +object { + display: block; + /* 1 */ + vertical-align: middle; + /* 2 */ +} + +/* +Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14) +*/ + +img, +video { + max-width: 100%; + height: auto; +} + +/* Make elements with the HTML hidden attribute stay hidden by default */ + +[hidden] { + display: none; +} + +[data-popper-arrow],[data-popper-arrow]:before { + position: absolute; + width: 8px; + height: 8px; + background: inherit; +} + +[data-popper-arrow] { + visibility: hidden; +} + +[data-popper-arrow]:before { + content: ""; + visibility: visible; + transform: rotate(45deg); +} + +[data-popper-arrow]:after { + content: ""; + visibility: visible; + transform: rotate(45deg); + position: absolute; + width: 9px; + height: 9px; + background: inherit; +} + +[role="tooltip"] > [data-popper-arrow]:before { + border-style: solid; + border-color: #e5e7eb; +} + +.dark [role="tooltip"] > [data-popper-arrow]:before { + border-style: solid; + border-color: #4b5563; +} + +[role="tooltip"] > [data-popper-arrow]:after { + border-style: solid; + border-color: #e5e7eb; +} + +.dark [role="tooltip"] > [data-popper-arrow]:after { + border-style: solid; + border-color: #4b5563; +} + +[data-popover][role="tooltip"][data-popper-placement^='top'] > [data-popper-arrow]:before { + border-bottom-width: 1px; + border-right-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='top'] > [data-popper-arrow]:after { + border-bottom-width: 1px; + border-right-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='right'] > [data-popper-arrow]:before { + border-bottom-width: 1px; + border-left-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='right'] > [data-popper-arrow]:after { + border-bottom-width: 1px; + border-left-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='bottom'] > [data-popper-arrow]:before { + border-top-width: 1px; + border-left-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='bottom'] > [data-popper-arrow]:after { + border-top-width: 1px; + border-left-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='left'] > [data-popper-arrow]:before { + border-top-width: 1px; + border-right-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='left'] > [data-popper-arrow]:after { + border-top-width: 1px; + border-right-width: 1px; +} + +[data-popover][role="tooltip"][data-popper-placement^='top'] > [data-popper-arrow] { + bottom: -5px; +} + +[data-popover][role="tooltip"][data-popper-placement^='bottom'] > [data-popper-arrow] { + top: -5px; +} + +[data-popover][role="tooltip"][data-popper-placement^='left'] > [data-popper-arrow] { + right: -5px; +} + +[data-popover][role="tooltip"][data-popper-placement^='right'] > [data-popper-arrow] { + left: -5px; +} + +[type='text'],[type='email'],[type='url'],[type='password'],[type='number'],[type='date'],[type='datetime-local'],[type='month'],[type='search'],[type='tel'],[type='time'],[type='week'],[multiple],textarea,select { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background-color: #fff; + border-color: #6B7280; + border-width: 1px; + border-radius: 0px; + padding-top: 0.5rem; + padding-right: 0.75rem; + padding-bottom: 0.5rem; + padding-left: 0.75rem; + font-size: 1rem; + line-height: 1.5rem; + --tw-shadow: 0 0 #0000; +} + +[type='text']:focus, [type='email']:focus, [type='url']:focus, [type='password']:focus, [type='number']:focus, [type='date']:focus, [type='datetime-local']:focus, [type='month']:focus, [type='search']:focus, [type='tel']:focus, [type='time']:focus, [type='week']:focus, [multiple]:focus, textarea:focus, select:focus { + outline: 2px solid transparent; + outline-offset: 2px; + --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: #1C64F2; + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow); + border-color: #1C64F2; +} + +input::-moz-placeholder, textarea::-moz-placeholder { + color: #6B7280; + opacity: 1; +} + +input::placeholder,textarea::placeholder { + color: #6B7280; + opacity: 1; +} + +::-webkit-datetime-edit-fields-wrapper { + padding: 0; +} + +::-webkit-date-and-time-value { + min-height: 1.5em; +} + +select:not([size]) { + background-image: url("data:image/svg+xml,%3csvg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 10 6'%3e %3cpath stroke='%236B7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m1 1 4 4 4-4'/%3e %3c/svg%3e"); + background-position: right 0.75rem center; + background-repeat: no-repeat; + background-size: 0.75em 0.75em; + padding-right: 2.5rem; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; +} + +:is([dir=rtl]) select:not([size]) { + background-position: left 0.75rem center; + padding-right: 0.75rem; + padding-left: 0; +} + +[multiple] { + background-image: initial; + background-position: initial; + background-repeat: unset; + background-size: initial; + padding-right: 0.75rem; + -webkit-print-color-adjust: unset; + print-color-adjust: unset; +} + +[type='checkbox'],[type='radio'] { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + padding: 0; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; + display: inline-block; + vertical-align: middle; + background-origin: border-box; + -webkit-user-select: none; + -moz-user-select: none; + user-select: none; + flex-shrink: 0; + height: 1rem; + width: 1rem; + color: #1C64F2; + background-color: #fff; + border-color: #6B7280; + border-width: 1px; + --tw-shadow: 0 0 #0000; +} + +[type='checkbox'] { + border-radius: 0px; +} + +[type='radio'] { + border-radius: 100%; +} + +[type='checkbox']:focus,[type='radio']:focus { + outline: 2px solid transparent; + outline-offset: 2px; + --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); + --tw-ring-offset-width: 2px; + --tw-ring-offset-color: #fff; + --tw-ring-color: #1C64F2; + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow); +} + +[type='checkbox']:checked,[type='radio']:checked,.dark [type='checkbox']:checked,.dark [type='radio']:checked { + border-color: transparent; + background-color: currentColor; + background-size: 0.55em 0.55em; + background-position: center; + background-repeat: no-repeat; +} + +[type='checkbox']:checked { + background-image: url("data:image/svg+xml,%3csvg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 12'%3e %3cpath stroke='white' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M1 5.917 5.724 10.5 15 1.5'/%3e %3c/svg%3e"); + background-repeat: no-repeat; + background-size: 0.55em 0.55em; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; +} + +[type='radio']:checked { + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); + background-size: 1em 1em; +} + +.dark [type='radio']:checked { + background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); + background-size: 1em 1em; +} + +[type='checkbox']:indeterminate { + background-image: url("data:image/svg+xml,%3csvg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 12'%3e %3cpath stroke='white' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M1 5.917 5.724 10.5 15 1.5'/%3e %3c/svg%3e"); + background-color: currentColor; + border-color: transparent; + background-position: center; + background-repeat: no-repeat; + background-size: 0.55em 0.55em; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; +} + +[type='checkbox']:indeterminate:hover,[type='checkbox']:indeterminate:focus { + border-color: transparent; + background-color: currentColor; +} + +[type='file'] { + background: unset; + border-color: inherit; + border-width: 0; + border-radius: 0; + padding: 0; + font-size: unset; + line-height: inherit; +} + +[type='file']:focus { + outline: 1px auto inherit; +} + +input[type=file]::file-selector-button { + color: white; + background: #1F2937; + border: 0; + font-weight: 500; + font-size: 0.875rem; + cursor: pointer; + padding-top: 0.625rem; + padding-bottom: 0.625rem; + padding-left: 2rem; + padding-right: 1rem; + margin-inline-start: -1rem; + margin-inline-end: 1rem; +} + +input[type=file]::file-selector-button:hover { + background: #374151; +} + +:is([dir=rtl]) input[type=file]::file-selector-button { + padding-right: 2rem; + padding-left: 1rem; +} + +.dark input[type=file]::file-selector-button { + color: white; + background: #4B5563; +} + +.dark input[type=file]::file-selector-button:hover { + background: #6B7280; +} + +input[type="range"]::-webkit-slider-thumb { + height: 1.25rem; + width: 1.25rem; + background: #1C64F2; + border-radius: 9999px; + border: 0; + appearance: none; + -moz-appearance: none; + -webkit-appearance: none; + cursor: pointer; +} + +input[type="range"]:disabled::-webkit-slider-thumb { + background: #9CA3AF; +} + +.dark input[type="range"]:disabled::-webkit-slider-thumb { + background: #6B7280; +} + +input[type="range"]:focus::-webkit-slider-thumb { + outline: 2px solid transparent; + outline-offset: 2px; + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(4px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); + --tw-ring-opacity: 1px; + --tw-ring-color: rgb(164 202 254 / var(--tw-ring-opacity)); +} + +input[type="range"]::-moz-range-thumb { + height: 1.25rem; + width: 1.25rem; + background: #1C64F2; + border-radius: 9999px; + border: 0; + appearance: none; + -moz-appearance: none; + -webkit-appearance: none; + cursor: pointer; +} + +input[type="range"]:disabled::-moz-range-thumb { + background: #9CA3AF; +} + +.dark input[type="range"]:disabled::-moz-range-thumb { + background: #6B7280; +} + +input[type="range"]::-moz-range-progress { + background: #3F83F8; +} + +input[type="range"]::-ms-fill-lower { + background: #3F83F8; +} + +*, ::before, ::after { + --tw-border-spacing-x: 0; + --tw-border-spacing-y: 0; + --tw-translate-x: 0; + --tw-translate-y: 0; + --tw-rotate: 0; + --tw-skew-x: 0; + --tw-skew-y: 0; + --tw-scale-x: 1; + --tw-scale-y: 1; + --tw-pan-x: ; + --tw-pan-y: ; + --tw-pinch-zoom: ; + --tw-scroll-snap-strictness: proximity; + --tw-gradient-from-position: ; + --tw-gradient-via-position: ; + --tw-gradient-to-position: ; + --tw-ordinal: ; + --tw-slashed-zero: ; + --tw-numeric-figure: ; + --tw-numeric-spacing: ; + --tw-numeric-fraction: ; + --tw-ring-inset: ; + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: rgb(63 131 248 / 0.5); + --tw-ring-offset-shadow: 0 0 #0000; + --tw-ring-shadow: 0 0 #0000; + --tw-shadow: 0 0 #0000; + --tw-shadow-colored: 0 0 #0000; + --tw-blur: ; + --tw-brightness: ; + --tw-contrast: ; + --tw-grayscale: ; + --tw-hue-rotate: ; + --tw-invert: ; + --tw-saturate: ; + --tw-sepia: ; + --tw-drop-shadow: ; + --tw-backdrop-blur: ; + --tw-backdrop-brightness: ; + --tw-backdrop-contrast: ; + --tw-backdrop-grayscale: ; + --tw-backdrop-hue-rotate: ; + --tw-backdrop-invert: ; + --tw-backdrop-opacity: ; + --tw-backdrop-saturate: ; + --tw-backdrop-sepia: ; +} + +::backdrop { + --tw-border-spacing-x: 0; + --tw-border-spacing-y: 0; + --tw-translate-x: 0; + --tw-translate-y: 0; + --tw-rotate: 0; + --tw-skew-x: 0; + --tw-skew-y: 0; + --tw-scale-x: 1; + --tw-scale-y: 1; + --tw-pan-x: ; + --tw-pan-y: ; + --tw-pinch-zoom: ; + --tw-scroll-snap-strictness: proximity; + --tw-gradient-from-position: ; + --tw-gradient-via-position: ; + --tw-gradient-to-position: ; + --tw-ordinal: ; + --tw-slashed-zero: ; + --tw-numeric-figure: ; + --tw-numeric-spacing: ; + --tw-numeric-fraction: ; + --tw-ring-inset: ; + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: rgb(63 131 248 / 0.5); + --tw-ring-offset-shadow: 0 0 #0000; + --tw-ring-shadow: 0 0 #0000; + --tw-shadow: 0 0 #0000; + --tw-shadow-colored: 0 0 #0000; + --tw-blur: ; + --tw-brightness: ; + --tw-contrast: ; + --tw-grayscale: ; + --tw-hue-rotate: ; + --tw-invert: ; + --tw-saturate: ; + --tw-sepia: ; + --tw-drop-shadow: ; + --tw-backdrop-blur: ; + --tw-backdrop-brightness: ; + --tw-backdrop-contrast: ; + --tw-backdrop-grayscale: ; + --tw-backdrop-hue-rotate: ; + --tw-backdrop-invert: ; + --tw-backdrop-opacity: ; + --tw-backdrop-saturate: ; + --tw-backdrop-sepia: ; +} + +.container { + width: 100%; +} + +@media (min-width: 640px) { + .container { + max-width: 640px; + } +} + +@media (min-width: 768px) { + .container { + max-width: 768px; + } +} + +@media (min-width: 1024px) { + .container { + max-width: 1024px; + } +} + +@media (min-width: 1280px) { + .container { + max-width: 1280px; + } +} + +@media (min-width: 1536px) { + .container { + max-width: 1536px; + } +} + +.sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border-width: 0; +} + +.pointer-events-none { + pointer-events: none; +} + +.static { + position: static; +} + +.fixed { + position: fixed; +} + +.absolute { + position: absolute; +} + +.relative { + position: relative; +} + +.inset-y-0 { + top: 0px; + bottom: 0px; +} + +.-left-3 { + left: -0.75rem; +} + +.bottom-6 { + bottom: 1.5rem; +} + +.end-2 { + inset-inline-end: 0.5rem; +} + +.end-2\.5 { + inset-inline-end: 0.625rem; +} + +.left-0 { + left: 0px; +} + +.right-0 { + right: 0px; +} + +.start-0 { + inset-inline-start: 0px; +} + +.top-0 { + top: 0px; +} + +.top-12 { + top: 3rem; +} + +.top-3 { + top: 0.75rem; +} + +.z-10 { + z-index: 10; +} + +.z-20 { + z-index: 20; +} + +.z-40 { + z-index: 40; +} + +.z-50 { + z-index: 50; +} + +.mx-1 { + margin-left: 0.25rem; + margin-right: 0.25rem; +} + +.mx-2 { + margin-left: 0.5rem; + margin-right: 0.5rem; +} + +.mx-auto { + margin-left: auto; + margin-right: auto; +} + +.my-2 { + margin-top: 0.5rem; + margin-bottom: 0.5rem; +} + +.my-4 { + margin-top: 1rem; + margin-bottom: 1rem; +} + +.my-8 { + margin-top: 2rem; + margin-bottom: 2rem; +} + +.-mb-px { + margin-bottom: -1px; +} + +.mb-1 { + margin-bottom: 0.25rem; +} + +.mb-2 { + margin-bottom: 0.5rem; +} + +.mb-4 { + margin-bottom: 1rem; +} + +.mb-5 { + margin-bottom: 1.25rem; +} + +.mb-6 { + margin-bottom: 1.5rem; +} + +.mb-8 { + margin-bottom: 2rem; +} + +.me-2 { + margin-inline-end: 0.5rem; +} + +.me-3 { + margin-inline-end: 0.75rem; +} + +.ml-2 { + margin-left: 0.5rem; +} + +.ml-3 { + margin-left: 0.75rem; +} + +.ml-4 { + margin-left: 1rem; +} + +.ml-8 { + margin-left: 2rem; +} + +.mr-1 { + margin-right: 0.25rem; +} + +.mr-10 { + margin-right: 2.5rem; +} + +.mr-2 { + margin-right: 0.5rem; +} + +.mr-2\.5 { + margin-right: 0.625rem; +} + +.mr-3 { + margin-right: 0.75rem; +} + +.mr-4 { + margin-right: 1rem; +} + +.mr-8 { + margin-right: 2rem; +} + +.ms-3 { + margin-inline-start: 0.75rem; +} + +.ms-auto { + margin-inline-start: auto; +} + +.mt-1 { + margin-top: 0.25rem; +} + +.mt-2 { + margin-top: 0.5rem; +} + +.mt-4 { + margin-top: 1rem; +} + +.mt-5 { + margin-top: 1.25rem; +} + +.mt-8 { + margin-top: 2rem; +} + +.block { + display: block; +} + +.inline-block { + display: inline-block; +} + +.inline { + display: inline; +} + +.flex { + display: flex; +} + +.inline-flex { + display: inline-flex; +} + +.grid { + display: grid; +} + +.\!hidden { + display: none !important; +} + +.hidden { + display: none; +} + +.h-1 { + height: 0.25rem; +} + +.h-1\.5 { + height: 0.375rem; +} + +.h-10 { + height: 2.5rem; +} + +.h-12 { + height: 3rem; +} + +.h-2 { + height: 0.5rem; +} + +.h-2\.5 { + height: 0.625rem; +} + +.h-3 { + height: 0.75rem; +} + +.h-32 { + height: 8rem; +} + +.h-4 { + height: 1rem; +} + +.h-40 { + height: 10rem; +} + +.h-48 { + height: 12rem; +} + +.h-5 { + height: 1.25rem; +} + +.h-6 { + height: 1.5rem; +} + +.h-8 { + height: 2rem; +} + +.h-9 { + height: 2.25rem; +} + +.h-full { + height: 100%; +} + +.h-screen { + height: 100vh; +} + +.max-h-full { + max-height: 100%; +} + +.max-h-screen { + max-height: 100vh; +} + +.w-1\/2 { + width: 50%; +} + +.w-1\/3 { + width: 33.333333%; +} + +.w-1\/4 { + width: 25%; +} + +.w-1\/6 { + width: 16.666667%; +} + +.w-10 { + width: 2.5rem; +} + +.w-11 { + width: 2.75rem; +} + +.w-12 { + width: 3rem; +} + +.w-2 { + width: 0.5rem; +} + +.w-2\.5 { + width: 0.625rem; +} + +.w-2\/3 { + width: 66.666667%; +} + +.w-3 { + width: 0.75rem; +} + +.w-3\.5 { + width: 0.875rem; +} + +.w-3\/6 { + width: 50%; +} + +.w-36 { + width: 9rem; +} + +.w-4 { + width: 1rem; +} + +.w-44 { + width: 11rem; +} + +.w-48 { + width: 12rem; +} + +.w-5 { + width: 1.25rem; +} + +.w-5\/6 { + width: 83.333333%; +} + +.w-56 { + width: 14rem; +} + +.w-6 { + width: 1.5rem; +} + +.w-64 { + width: 16rem; +} + +.w-8 { + width: 2rem; +} + +.w-9 { + width: 2.25rem; +} + +.w-96 { + width: 24rem; +} + +.w-\[calc\(100\%-16rem\)\] { + width: calc(100% - 16rem); +} + +.w-full { + width: 100%; +} + +.min-w-0 { + min-width: 0px; +} + +.max-w-2xl { + max-width: 42rem; +} + +.max-w-48 { + max-width: 12rem; +} + +.max-w-96 { + max-width: 24rem; +} + +.max-w-md { + max-width: 28rem; +} + +.flex-1 { + flex: 1 1 0%; +} + +.flex-shrink-0 { + flex-shrink: 0; +} + +.grow { + flex-grow: 1; +} + +.basis-0 { + flex-basis: 0px; +} + +.-translate-x-full { + --tw-translate-x: -100%; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); +} + +@keyframes spin { + to { + transform: rotate(360deg); + } +} + +.animate-spin { + animation: spin 1s linear infinite; +} + +.cursor-pointer { + cursor: pointer; +} + +.list-none { + list-style-type: none; +} + +.grid-cols-4 { + grid-template-columns: repeat(4, minmax(0, 1fr)); +} + +.grid-cols-7 { + grid-template-columns: repeat(7, minmax(0, 1fr)); +} + +.flex-row { + flex-direction: row; +} + +.flex-col { + flex-direction: column; +} + +.flex-wrap { + flex-wrap: wrap; +} + +.items-start { + align-items: flex-start; +} + +.items-end { + align-items: flex-end; +} + +.items-center { + align-items: center; +} + +.justify-start { + justify-content: flex-start; +} + +.justify-end { + justify-content: flex-end; +} + +.justify-center { + justify-content: center; +} + +.justify-between { + justify-content: space-between; +} + +.gap-y-3 { + row-gap: 0.75rem; +} + +.gap-y-6 { + row-gap: 1.5rem; +} + +.gap-y-8 { + row-gap: 2rem; +} + +.space-x-2 > :not([hidden]) ~ :not([hidden]) { + --tw-space-x-reverse: 0; + margin-right: calc(0.5rem * var(--tw-space-x-reverse)); + margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse))); +} + +.space-x-8 > :not([hidden]) ~ :not([hidden]) { + --tw-space-x-reverse: 0; + margin-right: calc(2rem * var(--tw-space-x-reverse)); + margin-left: calc(2rem * calc(1 - var(--tw-space-x-reverse))); +} + +.space-y-2 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(0.5rem * var(--tw-space-y-reverse)); +} + +.space-y-3 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(0.75rem * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(0.75rem * var(--tw-space-y-reverse)); +} + +.space-y-4 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(1rem * var(--tw-space-y-reverse)); +} + +.space-y-8 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(2rem * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(2rem * var(--tw-space-y-reverse)); +} + +.divide-y > :not([hidden]) ~ :not([hidden]) { + --tw-divide-y-reverse: 0; + border-top-width: calc(1px * calc(1 - var(--tw-divide-y-reverse))); + border-bottom-width: calc(1px * var(--tw-divide-y-reverse)); +} + +.divide-gray-100 > :not([hidden]) ~ :not([hidden]) { + --tw-divide-opacity: 1; + border-color: rgb(243 244 246 / var(--tw-divide-opacity)); +} + +.self-center { + align-self: center; +} + +.justify-self-start { + justify-self: start; +} + +.overflow-y-auto { + overflow-y: auto; +} + +.overflow-x-hidden { + overflow-x: hidden; +} + +.truncate { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.whitespace-nowrap { + white-space: nowrap; +} + +.text-nowrap { + text-wrap: nowrap; +} + +.rounded { + border-radius: 0.25rem; +} + +.rounded-full { + border-radius: 9999px; +} + +.rounded-lg { + border-radius: 0.5rem; +} + +.rounded-xl { + border-radius: 0.75rem; +} + +.rounded-b { + border-bottom-right-radius: 0.25rem; + border-bottom-left-radius: 0.25rem; +} + +.rounded-e-lg { + border-start-end-radius: 0.5rem; + border-end-end-radius: 0.5rem; +} + +.rounded-l-lg { + border-top-left-radius: 0.5rem; + border-bottom-left-radius: 0.5rem; +} + +.rounded-r-lg { + border-top-right-radius: 0.5rem; + border-bottom-right-radius: 0.5rem; +} + +.rounded-s-lg { + border-start-start-radius: 0.5rem; + border-end-start-radius: 0.5rem; +} + +.rounded-t { + border-top-left-radius: 0.25rem; + border-top-right-radius: 0.25rem; +} + +.rounded-t-lg { + border-top-left-radius: 0.5rem; + border-top-right-radius: 0.5rem; +} + +.border { + border-width: 1px; +} + +.border-0 { + border-width: 0px; +} + +.border-b { + border-bottom-width: 1px; +} + +.border-b-2 { + border-bottom-width: 2px; +} + +.border-r { + border-right-width: 1px; +} + +.border-t { + border-top-width: 1px; +} + +.border-blue-600 { + --tw-border-opacity: 1; + border-color: rgb(28 100 242 / var(--tw-border-opacity)); +} + +.border-gray-200 { + --tw-border-opacity: 1; + border-color: rgb(229 231 235 / var(--tw-border-opacity)); +} + +.border-gray-300 { + --tw-border-opacity: 1; + border-color: rgb(209 213 219 / var(--tw-border-opacity)); +} + +.border-transparent { + border-color: transparent; +} + +.bg-blue-700 { + --tw-bg-opacity: 1; + background-color: rgb(26 86 219 / var(--tw-bg-opacity)); +} + +.bg-gray-100 { + --tw-bg-opacity: 1; + background-color: rgb(243 244 246 / var(--tw-bg-opacity)); +} + +.bg-gray-200 { + --tw-bg-opacity: 1; + background-color: rgb(229 231 235 / var(--tw-bg-opacity)); +} + +.bg-gray-50 { + --tw-bg-opacity: 1; + background-color: rgb(249 250 251 / var(--tw-bg-opacity)); +} + +.bg-gray-900\/50 { + background-color: rgb(17 24 39 / 0.5); +} + +.bg-green-100 { + --tw-bg-opacity: 1; + background-color: rgb(222 247 236 / var(--tw-bg-opacity)); +} + +.bg-green-200 { + --tw-bg-opacity: 1; + background-color: rgb(188 240 218 / var(--tw-bg-opacity)); +} + +.bg-green-50 { + --tw-bg-opacity: 1; + background-color: rgb(243 250 247 / var(--tw-bg-opacity)); +} + +.bg-green-600 { + --tw-bg-opacity: 1; + background-color: rgb(5 122 85 / var(--tw-bg-opacity)); +} + +.bg-green-700 { + --tw-bg-opacity: 1; + background-color: rgb(4 108 78 / var(--tw-bg-opacity)); +} + +.bg-red-100 { + --tw-bg-opacity: 1; + background-color: rgb(253 232 232 / var(--tw-bg-opacity)); +} + +.bg-red-50 { + --tw-bg-opacity: 1; + background-color: rgb(253 242 242 / var(--tw-bg-opacity)); +} + +.bg-red-600 { + --tw-bg-opacity: 1; + background-color: rgb(224 36 36 / var(--tw-bg-opacity)); +} + +.bg-red-700 { + --tw-bg-opacity: 1; + background-color: rgb(200 30 30 / var(--tw-bg-opacity)); +} + +.bg-transparent { + background-color: transparent; +} + +.bg-white { + --tw-bg-opacity: 1; + background-color: rgb(255 255 255 / var(--tw-bg-opacity)); +} + +.bg-yellow-100 { + --tw-bg-opacity: 1; + background-color: rgb(253 246 178 / var(--tw-bg-opacity)); +} + +.bg-yellow-400 { + --tw-bg-opacity: 1; + background-color: rgb(227 160 8 / var(--tw-bg-opacity)); +} + +.bg-gradient-to-tr { + background-image: linear-gradient(to top right, var(--tw-gradient-stops)); +} + +.from-sky-500 { + --tw-gradient-from: #0ea5e9 var(--tw-gradient-from-position); + --tw-gradient-to: rgb(14 165 233 / 0) var(--tw-gradient-to-position); + --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); +} + +.from-30\% { + --tw-gradient-from-position: 30%; +} + +.to-indigo-500 { + --tw-gradient-to: #6875F5 var(--tw-gradient-to-position); +} + +.to-70\% { + --tw-gradient-to-position: 70%; +} + +.object-contain { + -o-object-fit: contain; + object-fit: contain; +} + +.object-left { + -o-object-position: left; + object-position: left; +} + +.p-1 { + padding: 0.25rem; +} + +.p-2 { + padding: 0.5rem; +} + +.p-2\.5 { + padding: 0.625rem; +} + +.p-4 { + padding: 1rem; +} + +.p-6 { + padding: 1.5rem; +} + +.p-8 { + padding: 2rem; +} + +.px-1 { + padding-left: 0.25rem; + padding-right: 0.25rem; +} + +.px-2 { + padding-left: 0.5rem; + padding-right: 0.5rem; +} + +.px-2\.5 { + padding-left: 0.625rem; + padding-right: 0.625rem; +} + +.px-3 { + padding-left: 0.75rem; + padding-right: 0.75rem; +} + +.px-4 { + padding-left: 1rem; + padding-right: 1rem; +} + +.px-5 { + padding-left: 1.25rem; + padding-right: 1.25rem; +} + +.px-6 { + padding-left: 1.5rem; + padding-right: 1.5rem; +} + +.px-8 { + padding-left: 2rem; + padding-right: 2rem; +} + +.py-0 { + padding-top: 0px; + padding-bottom: 0px; +} + +.py-0\.5 { + padding-top: 0.125rem; + padding-bottom: 0.125rem; +} + +.py-1 { + padding-top: 0.25rem; + padding-bottom: 0.25rem; +} + +.py-2 { + padding-top: 0.5rem; + padding-bottom: 0.5rem; +} + +.py-2\.5 { + padding-top: 0.625rem; + padding-bottom: 0.625rem; +} + +.py-3 { + padding-top: 0.75rem; + padding-bottom: 0.75rem; +} + +.py-4 { + padding-top: 1rem; + padding-bottom: 1rem; +} + +.py-5 { + padding-top: 1.25rem; + padding-bottom: 1.25rem; +} + +.py-8 { + padding-top: 2rem; + padding-bottom: 2rem; +} + +.pb-20 { + padding-bottom: 5rem; +} + +.pb-4 { + padding-bottom: 1rem; +} + +.pb-8 { + padding-bottom: 2rem; +} + +.pl-10 { + padding-left: 2.5rem; +} + +.pl-2 { + padding-left: 0.5rem; +} + +.pl-3 { + padding-left: 0.75rem; +} + +.pr-2 { + padding-right: 0.5rem; +} + +.pr-6 { + padding-right: 1.5rem; +} + +.ps-10 { + padding-inline-start: 2.5rem; +} + +.ps-3 { + padding-inline-start: 0.75rem; +} + +.pt-2 { + padding-top: 0.5rem; +} + +.pt-5 { + padding-top: 1.25rem; +} + +.pt-8 { + padding-top: 2rem; +} + +.text-left { + text-align: left; +} + +.text-center { + text-align: center; +} + +.text-2xl { + font-size: 1.5rem; + line-height: 2rem; +} + +.text-base { + font-size: 1rem; + line-height: 1.5rem; +} + +.text-lg { + font-size: 1.125rem; + line-height: 1.75rem; +} + +.text-sm { + font-size: 0.875rem; + line-height: 1.25rem; +} + +.text-xl { + font-size: 1.25rem; + line-height: 1.75rem; +} + +.font-bold { + font-weight: 700; +} + +.font-medium { + font-weight: 500; +} + +.font-normal { + font-weight: 400; +} + +.font-semibold { + font-weight: 600; +} + +.leading-6 { + line-height: 1.5rem; +} + +.leading-9 { + line-height: 2.25rem; +} + +.leading-tight { + line-height: 1.25; +} + +.tracking-tight { + letter-spacing: -0.025em; +} + +.text-blue-600 { + --tw-text-opacity: 1; + color: rgb(28 100 242 / var(--tw-text-opacity)); +} + +.text-blue-700 { + --tw-text-opacity: 1; + color: rgb(26 86 219 / var(--tw-text-opacity)); +} + +.text-gray-300 { + --tw-text-opacity: 1; + color: rgb(209 213 219 / var(--tw-text-opacity)); +} + +.text-gray-400 { + --tw-text-opacity: 1; + color: rgb(156 163 175 / var(--tw-text-opacity)); +} + +.text-gray-500 { + --tw-text-opacity: 1; + color: rgb(107 114 128 / var(--tw-text-opacity)); +} + +.text-gray-600 { + --tw-text-opacity: 1; + color: rgb(75 85 99 / var(--tw-text-opacity)); +} + +.text-gray-700 { + --tw-text-opacity: 1; + color: rgb(55 65 81 / var(--tw-text-opacity)); +} + +.text-gray-800 { + --tw-text-opacity: 1; + color: rgb(31 41 55 / var(--tw-text-opacity)); +} + +.text-gray-900 { + --tw-text-opacity: 1; + color: rgb(17 24 39 / var(--tw-text-opacity)); +} + +.text-green-800 { + --tw-text-opacity: 1; + color: rgb(3 84 63 / var(--tw-text-opacity)); +} + +.text-red-500 { + --tw-text-opacity: 1; + color: rgb(240 82 82 / var(--tw-text-opacity)); +} + +.text-red-700 { + --tw-text-opacity: 1; + color: rgb(200 30 30 / var(--tw-text-opacity)); +} + +.text-red-800 { + --tw-text-opacity: 1; + color: rgb(155 28 28 / var(--tw-text-opacity)); +} + +.text-white { + --tw-text-opacity: 1; + color: rgb(255 255 255 / var(--tw-text-opacity)); +} + +.text-yellow-500 { + --tw-text-opacity: 1; + color: rgb(194 120 3 / var(--tw-text-opacity)); +} + +.text-yellow-800 { + --tw-text-opacity: 1; + color: rgb(114 59 19 / var(--tw-text-opacity)); +} + +.shadow { + --tw-shadow: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1); + --tw-shadow-colored: 0 1px 3px 0 var(--tw-shadow-color), 0 1px 2px -1px var(--tw-shadow-color); + box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); +} + +.shadow-lg { + --tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1); + --tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color); + box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); +} + +.shadow-sm { + --tw-shadow: 0 1px 2px 0 rgb(0 0 0 / 0.05); + --tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color); + box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); +} + +.\!filter { + filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow) !important; +} + +.filter { + filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow); +} + +.transition { + transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, -webkit-backdrop-filter; + transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter; + transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter, -webkit-backdrop-filter; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + transition-duration: 150ms; +} + +.transition-transform { + transition-property: transform; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + transition-duration: 150ms; +} + +.duration-75 { + transition-duration: 75ms; +} + +.after\:absolute::after { + content: var(--tw-content); + position: absolute; +} + +.after\:start-\[2px\]::after { + content: var(--tw-content); + inset-inline-start: 2px; +} + +.after\:top-\[2px\]::after { + content: var(--tw-content); + top: 2px; +} + +.after\:h-5::after { + content: var(--tw-content); + height: 1.25rem; +} + +.after\:w-5::after { + content: var(--tw-content); + width: 1.25rem; +} + +.after\:rounded-full::after { + content: var(--tw-content); + border-radius: 9999px; +} + +.after\:border::after { + content: var(--tw-content); + border-width: 1px; +} + +.after\:border-gray-300::after { + content: var(--tw-content); + --tw-border-opacity: 1; + border-color: rgb(209 213 219 / var(--tw-border-opacity)); +} + +.after\:bg-white::after { + content: var(--tw-content); + --tw-bg-opacity: 1; + background-color: rgb(255 255 255 / var(--tw-bg-opacity)); +} + +.after\:transition-all::after { + content: var(--tw-content); + transition-property: all; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + transition-duration: 150ms; +} + +.after\:content-\[\'\'\]::after { + --tw-content: ''; + content: var(--tw-content); +} + +.hover\:border-gray-300:hover { + --tw-border-opacity: 1; + border-color: rgb(209 213 219 / var(--tw-border-opacity)); +} + +.hover\:bg-blue-800:hover { + --tw-bg-opacity: 1; + background-color: rgb(30 66 159 / var(--tw-bg-opacity)); +} + +.hover\:bg-gray-100:hover { + --tw-bg-opacity: 1; + background-color: rgb(243 244 246 / var(--tw-bg-opacity)); +} + +.hover\:bg-gray-200:hover { + --tw-bg-opacity: 1; + background-color: rgb(229 231 235 / var(--tw-bg-opacity)); +} + +.hover\:bg-green-800:hover { + --tw-bg-opacity: 1; + background-color: rgb(3 84 63 / var(--tw-bg-opacity)); +} + +.hover\:bg-red-800:hover { + --tw-bg-opacity: 1; + background-color: rgb(155 28 28 / var(--tw-bg-opacity)); +} + +.hover\:text-blue-700:hover { + --tw-text-opacity: 1; + color: rgb(26 86 219 / var(--tw-text-opacity)); +} + +.hover\:text-gray-600:hover { + --tw-text-opacity: 1; + color: rgb(75 85 99 / var(--tw-text-opacity)); +} + +.hover\:text-gray-900:hover { + --tw-text-opacity: 1; + color: rgb(17 24 39 / var(--tw-text-opacity)); +} + +.focus\:z-10:focus { + z-index: 10; +} + +.focus\:border-blue-500:focus { + --tw-border-opacity: 1; + border-color: rgb(63 131 248 / var(--tw-border-opacity)); +} + +.focus\:bg-gray-100:focus { + --tw-bg-opacity: 1; + background-color: rgb(243 244 246 / var(--tw-bg-opacity)); +} + +.focus\:outline-none:focus { + outline: 2px solid transparent; + outline-offset: 2px; +} + +.focus\:ring-2:focus { + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); +} + +.focus\:ring-4:focus { + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(4px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); +} + +.focus\:ring-blue-300:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(164 202 254 / var(--tw-ring-opacity)); +} + +.focus\:ring-blue-500:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(63 131 248 / var(--tw-ring-opacity)); +} + +.focus\:ring-gray-100:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(243 244 246 / var(--tw-ring-opacity)); +} + +.focus\:ring-gray-200:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(229 231 235 / var(--tw-ring-opacity)); +} + +.focus\:ring-gray-300:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(209 213 219 / var(--tw-ring-opacity)); +} + +.focus\:ring-green-300:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(132 225 188 / var(--tw-ring-opacity)); +} + +.focus\:ring-red-300:focus { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(248 180 180 / var(--tw-ring-opacity)); +} + +.disabled\:bg-red-400:disabled { + --tw-bg-opacity: 1; + background-color: rgb(249 128 128 / var(--tw-bg-opacity)); +} + +.peer:checked ~ .peer-checked\:bg-blue-600 { + --tw-bg-opacity: 1; + background-color: rgb(28 100 242 / var(--tw-bg-opacity)); +} + +.peer:checked ~ .peer-checked\:after\:translate-x-full::after { + content: var(--tw-content); + --tw-translate-x: 100%; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); +} + +.peer:checked ~ .peer-checked\:after\:border-white::after { + content: var(--tw-content); + --tw-border-opacity: 1; + border-color: rgb(255 255 255 / var(--tw-border-opacity)); +} + +.peer:focus ~ .peer-focus\:outline-none { + outline: 2px solid transparent; + outline-offset: 2px; +} + +.peer:focus ~ .peer-focus\:ring-4 { + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(4px + var(--tw-ring-offset-width)) var(--tw-ring-color); + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); +} + +.peer:focus ~ .peer-focus\:ring-blue-300 { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(164 202 254 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:divide-gray-600) > :not([hidden]) ~ :not([hidden]) { + --tw-divide-opacity: 1; + border-color: rgb(75 85 99 / var(--tw-divide-opacity)); +} + +:is(.dark .dark\:border) { + border-width: 1px; +} + +:is(.dark .dark\:border-blue-500) { + --tw-border-opacity: 1; + border-color: rgb(63 131 248 / var(--tw-border-opacity)); +} + +:is(.dark .dark\:border-gray-500) { + --tw-border-opacity: 1; + border-color: rgb(107 114 128 / var(--tw-border-opacity)); +} + +:is(.dark .dark\:border-gray-600) { + --tw-border-opacity: 1; + border-color: rgb(75 85 99 / var(--tw-border-opacity)); +} + +:is(.dark .dark\:border-gray-700) { + --tw-border-opacity: 1; + border-color: rgb(55 65 81 / var(--tw-border-opacity)); +} + +:is(.dark .dark\:bg-blue-600) { + --tw-bg-opacity: 1; + background-color: rgb(28 100 242 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-gray-600) { + --tw-bg-opacity: 1; + background-color: rgb(75 85 99 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-gray-700) { + --tw-bg-opacity: 1; + background-color: rgb(55 65 81 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-gray-800) { + --tw-bg-opacity: 1; + background-color: rgb(31 41 55 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-gray-900) { + --tw-bg-opacity: 1; + background-color: rgb(17 24 39 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-gray-900\/80) { + background-color: rgb(17 24 39 / 0.8); +} + +:is(.dark .dark\:bg-green-600) { + --tw-bg-opacity: 1; + background-color: rgb(5 122 85 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-green-700) { + --tw-bg-opacity: 1; + background-color: rgb(4 108 78 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-green-900) { + --tw-bg-opacity: 1; + background-color: rgb(1 71 55 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-red-600) { + --tw-bg-opacity: 1; + background-color: rgb(224 36 36 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-red-900) { + --tw-bg-opacity: 1; + background-color: rgb(119 29 29 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:bg-yellow-900) { + --tw-bg-opacity: 1; + background-color: rgb(99 49 18 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:text-blue-500) { + --tw-text-opacity: 1; + color: rgb(63 131 248 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-gray-200) { + --tw-text-opacity: 1; + color: rgb(229 231 235 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-gray-300) { + --tw-text-opacity: 1; + color: rgb(209 213 219 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-gray-400) { + --tw-text-opacity: 1; + color: rgb(156 163 175 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-gray-500) { + --tw-text-opacity: 1; + color: rgb(107 114 128 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-green-300) { + --tw-text-opacity: 1; + color: rgb(132 225 188 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-green-400) { + --tw-text-opacity: 1; + color: rgb(49 196 141 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-red-300) { + --tw-text-opacity: 1; + color: rgb(248 180 180 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-red-400) { + --tw-text-opacity: 1; + color: rgb(249 128 128 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-white) { + --tw-text-opacity: 1; + color: rgb(255 255 255 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:text-yellow-300) { + --tw-text-opacity: 1; + color: rgb(250 202 21 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:placeholder-gray-400)::-moz-placeholder { + --tw-placeholder-opacity: 1; + color: rgb(156 163 175 / var(--tw-placeholder-opacity)); +} + +:is(.dark .dark\:placeholder-gray-400)::placeholder { + --tw-placeholder-opacity: 1; + color: rgb(156 163 175 / var(--tw-placeholder-opacity)); +} + +:is(.dark .dark\:ring-offset-gray-800) { + --tw-ring-offset-color: #1F2937; +} + +:is(.dark .dark\:hover\:bg-blue-700:hover) { + --tw-bg-opacity: 1; + background-color: rgb(26 86 219 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:hover\:bg-gray-600:hover) { + --tw-bg-opacity: 1; + background-color: rgb(75 85 99 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:hover\:bg-gray-700:hover) { + --tw-bg-opacity: 1; + background-color: rgb(55 65 81 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:hover\:bg-green-700:hover) { + --tw-bg-opacity: 1; + background-color: rgb(4 108 78 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:hover\:bg-red-700:hover) { + --tw-bg-opacity: 1; + background-color: rgb(200 30 30 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:hover\:text-gray-300:hover) { + --tw-text-opacity: 1; + color: rgb(209 213 219 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:hover\:text-white:hover) { + --tw-text-opacity: 1; + color: rgb(255 255 255 / var(--tw-text-opacity)); +} + +:is(.dark .dark\:focus\:border-blue-500:focus) { + --tw-border-opacity: 1; + border-color: rgb(63 131 248 / var(--tw-border-opacity)); +} + +:is(.dark .dark\:focus\:bg-gray-700:focus) { + --tw-bg-opacity: 1; + background-color: rgb(55 65 81 / var(--tw-bg-opacity)); +} + +:is(.dark .dark\:focus\:ring-blue-500:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(63 131 248 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:focus\:ring-blue-600:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(28 100 242 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:focus\:ring-blue-800:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(30 66 159 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:focus\:ring-gray-600:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(75 85 99 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:focus\:ring-gray-700:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(55 65 81 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:focus\:ring-green-800:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(3 84 63 / var(--tw-ring-opacity)); +} + +:is(.dark .dark\:focus\:ring-red-800:focus) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(155 28 28 / var(--tw-ring-opacity)); +} + +:is(.dark .peer:focus ~ .dark\:peer-focus\:ring-blue-800) { + --tw-ring-opacity: 1; + --tw-ring-color: rgb(30 66 159 / var(--tw-ring-opacity)); +} + +@media (min-width: 640px) { + .sm\:flex { + display: flex; + } + + .sm\:max-w-md { + max-width: 28rem; + } + + .sm\:translate-x-0 { + --tw-translate-x: 0px; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); + } + + .sm\:space-x-4 > :not([hidden]) ~ :not([hidden]) { + --tw-space-x-reverse: 0; + margin-right: calc(1rem * var(--tw-space-x-reverse)); + margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse))); + } + + .sm\:space-y-0 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(0px * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(0px * var(--tw-space-y-reverse)); + } +} + +@media (min-width: 768px) { + .md\:mt-0 { + margin-top: 0px; + } + + .md\:hidden { + display: none; + } + + .md\:h-screen { + height: 100vh; + } + + .md\:w-1\/2 { + width: 50%; + } + + .md\:w-1\/3 { + width: 33.333333%; + } + + .md\:w-1\/6 { + width: 16.666667%; + } + + .md\:flex-row { + flex-direction: row; + } + + .md\:space-x-4 > :not([hidden]) ~ :not([hidden]) { + --tw-space-x-reverse: 0; + margin-right: calc(1rem * var(--tw-space-x-reverse)); + margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse))); + } + + .md\:space-y-0 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(0px * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(0px * var(--tw-space-y-reverse)); + } + + .md\:p-5 { + padding: 1.25rem; + } +} + +@media (min-width: 1024px) { + .lg\:w-1\/3 { + width: 33.333333%; + } + + .lg\:flex-row { + flex-direction: row; + } + + .lg\:justify-between { + justify-content: space-between; + } + + .lg\:space-y-0 > :not([hidden]) ~ :not([hidden]) { + --tw-space-y-reverse: 0; + margin-top: calc(0px * calc(1 - var(--tw-space-y-reverse))); + margin-bottom: calc(0px * var(--tw-space-y-reverse)); + } + + .lg\:object-left { + -o-object-position: left; + object-position: left; + } + + .lg\:py-0 { + padding-top: 0px; + padding-bottom: 0px; + } +} + +@media (min-width: 1280px) { + .xl\:p-0 { + padding: 0px; + } +} + +.rtl\:rotate-180:where([dir="rtl"], [dir="rtl"] *) { + --tw-rotate: 180deg; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); +} + +.rtl\:space-x-reverse:where([dir="rtl"], [dir="rtl"] *) > :not([hidden]) ~ :not([hidden]) { + --tw-space-x-reverse: 1; +} + +.peer:checked ~ .rtl\:peer-checked\:after\:-translate-x-full:where([dir="rtl"], [dir="rtl"] *)::after { + content: var(--tw-content); + --tw-translate-x: -100%; + transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); +} \ No newline at end of file diff --git a/crates/dashboard/src/account.rs b/crates/dashboard/src/account.rs new file mode 100644 index 0000000..13b42e1 --- /dev/null +++ b/crates/dashboard/src/account.rs @@ -0,0 +1,294 @@ +use anyhow::Result; +use gloo_net::http::Request; +use lapdev_common::{ + console::{MeUser, NewSessionResponse}, + AuthProvider, +}; +use leptos::prelude::*; +use leptos_router::hooks::use_params_map; + +use crate::{ + cluster::{get_cluster_info, OauthSettings}, + component::badge::{Badge, BadgeVariant}, + modal::ErrorResponse, +}; + +pub async fn get_login() -> Result { + let resp: MeUser = Request::get("/api/private/me").send().await?.json().await?; + Ok(resp) +} + +async fn now_login(provider: AuthProvider) -> Result<()> { + let location = window().window().location(); + let next = location.href().unwrap_or_default(); + let next = urlencoding::encode(&next).to_string(); + let resp = Request::get("/api/private/session") + .query([ + ("provider", &provider.to_string()), + ("next", &next), + ("host", &location.origin().unwrap_or_default()), + ]) + .send() + .await?; + let resp: NewSessionResponse = resp.json().await?; + let _ = window().location().set_href(&resp.url); + + Ok(()) +} + +async fn now_logout() { + let _ = Request::delete("/api/private/session").send().await; + let _ = window().location().set_href("/"); +} + +#[component] +pub fn Login() -> impl IntoView { + let cluster_info = get_cluster_info(); + + view! { +
+
+
+
+ + + + + + + Lapdev + +
+

+ Spin up production grade Kubernetes environments in minutes. +

+

+ Lapdev mirrors your live manifests, keeps every dev space in sync, and lets teams ship without wrestling YAML or waiting on pipelines. +

+
+
+ + + + + One click replicas of prod workloads, config, and secrets. + + + + + + Devbox intercepts traffic for true local debugging. + + + + + + Instant preview URLs with SSO gated sharing. + +
+
+
+
+
+ {move || match cluster_info.with(|i| i.as_ref().map(|i| i.auth_providers.clone())) { + Some(auth_providers) if !auth_providers.is_empty() => view! { }.into_any(), + Some(_) => view! { +
+

+ Configure your identity provider +

+

+ Connect GitHub or GitLab to unlock Lapdev SSO for your organization. +

+
+ +
+
+ }.into_any(), + None => view! { +
+
+
+
+
+
+

+ Loading cluster SSO providers... +

+
+ }.into_any(), + }} +
+
+
+
+ } +} + +#[component] +pub fn LoginWithView(auth_providers: Vec) -> impl IntoView { + let login = use_context::>>().unwrap(); + view! { +
+
+
+

+ Sign in to Lapdev + + "Public beta" + +

+

+ Choose your identity provider to launch your personal or branch workspace. +

+
+ + + +
+
+
+
+ } +} + +#[component] +pub fn InitAuthProvidersView() -> impl IntoView { + view! { + + } +} + +#[component] +pub fn NavUserControl(user_control_hidden: RwSignal) -> impl IntoView { + let login = use_context::>>().unwrap(); + + let logout = move |_| { + user_control_hidden.set(true); + Action::new_local(move |_| now_logout()).dispatch(()); + }; + + view! { +
+
+ + { move || login.get().flatten().and_then(|l| l.name.clone()).unwrap_or("".to_string()) } + + + { move || login.get().flatten().and_then(|l| l.email.clone()).unwrap_or("".to_string()) } + +
+ + +
+ } +} + +#[component] +pub fn AccountSettings() -> impl IntoView { + view! { +
+
+ User Settings +
+

{"Manage your account settings"}

+
+ } +} + +async fn join_org(invitation_id: String) -> Result> { + let resp = Request::put(&format!("/api/v1/join/{invitation_id}")) + .send() + .await?; + if resp.status() != 204 { + let error = resp + .json::() + .await + .unwrap_or_else(|_| ErrorResponse { + error: "Internal Server Error".to_string(), + }); + return Ok(Some(error)); + } + let _ = window().location().set_href("/"); + Ok(None) +} + +#[component] +pub fn JoinView() -> impl IntoView { + let params = use_params_map(); + let id = Signal::derive(move || params.with(|params| params.get("id")).unwrap_or_default()); + + let result = { LocalResource::new(move || async move { join_org(id.get_untracked()).await }) }; + + view! { + { + move || if let Some(error) = result.with(|r| r.as_ref().and_then(|r| r.as_ref().ok().cloned())).flatten() { + view! { +
+ { error.error } +
+ }.into_any() + } else { + ().into_any() + } + } + } +} diff --git a/crates/dashboard/src/app.rs b/crates/dashboard/src/app.rs new file mode 100644 index 0000000..bb6b14b --- /dev/null +++ b/crates/dashboard/src/app.rs @@ -0,0 +1,247 @@ +use std::sync::Arc; + +use anyhow::Result; +use gloo_net::http::Request; +use lapdev_api_hrpc::HrpcServiceClient; +use lapdev_common::{console::MeUser, ClusterInfo}; +use leptos::prelude::*; +use leptos_router::{ + components::{Route, Router, Routes}, + path, +}; + +use crate::{ + account::{get_login, AccountSettings, JoinView, Login}, + audit_log::AuditLogView, + cli_auth::CliAuth, + cli_success::CliSuccess, + cluster::{ClusterSettings, ClusterUsersView, MachineTypeView, WorkspaceHostView}, + component::sidebar::SidebarData, + git_provider::GitProviderView, + home::DashboardHome, + kube_app_catalog::KubeAppCatalog, + kube_app_catalog_detail::KubeAppCatalogDetail, + kube_app_catalog_workload::KubeAppCatalogWorkload, + kube_cluster::KubeCluster, + kube_environment::{EnvironmentType, KubeEnvironment}, + kube_environment_detail::KubeEnvironmentDetail, + kube_environment_workload::KubeEnvironmentWorkload, + kube_resource::KubeResource, + license::{LicenseView, SignLicenseView}, + nav::{AdminSideNav, NavExpanded, SideNav, TopNav}, + organization::{NewOrgModal, OrgMembers, OrgSettings}, + project::{ProjectDetails, Projects}, + quota::QuotaView, + ssh_key::SshKeys, + usage::UsageView, + workspace::{WorkspaceDetails, Workspaces}, +}; + +#[derive(Clone)] +pub struct AppConfig { + pub show_lapdev_website: RwSignal, + pub environment_type: RwSignal, + pub current_page: RwSignal, + pub header_links: RwSignal>, +} + +#[component] +fn Root() -> impl IntoView { + view! { + + } +} + +pub fn get_hrpc_client() -> HrpcServiceClient { + HrpcServiceClient::new("/api/rpc".to_string()) +} + +async fn get_cluster_info() -> Result { + let resp = Request::get("/api/v1/cluster_info").send().await?; + let info: ClusterInfo = resp.json().await?; + Ok(info) +} + +pub fn set_context() { + let login_counter = RwSignal::new_local(0); + provide_context(login_counter); + + let login_fetching = RwSignal::new_local(true); + provide_context(login_fetching); + + let login_fetching_for_resource = login_fetching; + let login = LocalResource::new(move || { + login_fetching_for_resource.set(true); + async move { + let result = get_login().await.ok(); + login_fetching_for_resource.set(false); + result + } + }); + Effect::new(move |_| { + login_counter.track(); + login_fetching.set(true); + login.refetch(); + }); + provide_context(login); + + let cluster_info = LocalResource::new(|| async move { get_cluster_info().await.ok() }); + let cluster_info = Signal::derive_local(move || cluster_info.get().flatten()); + provide_context(cluster_info); + + let current_org = Signal::derive_local(move || { + let login = login.get().flatten(); + login.map(|u| u.organization) + }); + provide_context(current_org); + + let pathname = window().location().pathname().unwrap_or_default(); + let nav_expanded = NavExpanded { + orgnization: RwSignal::new(pathname.starts_with("/organization")), + account: RwSignal::new(pathname.starts_with("/account")), + k8s_environments: RwSignal::new(pathname.starts_with("/kubernetes/environments")), + }; + provide_context(nav_expanded); + + provide_context(AppConfig { + show_lapdev_website: RwSignal::new(false), + environment_type: RwSignal::new(EnvironmentType::Personal), + current_page: RwSignal::new(String::new()), + header_links: RwSignal::new(Vec::new()), + }); + + provide_context(SidebarData { + open: RwSignal::new(true), + }); + + provide_context(Arc::new(HrpcServiceClient::new("/api/rpc".to_string()))); +} + +#[component] +pub fn App() -> impl IntoView { + set_context(); + view! { + + + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + + + + } +} + +#[component] +pub fn WrappedView(element: T) -> impl IntoView +where + T: IntoView + Copy + 'static + Send + Sync, +{ + let login = use_context::>>().unwrap(); + let login_fetching = expect_context::>(); + let new_org_modal_hidden = RwSignal::new_local(true); + view! { + } + > + + + } + } + > +
+ +
+ +
+ {element} +
+
+ +
+ + } +} + +#[component] +pub fn AdminWrappedView(element: T) -> impl IntoView +where + T: IntoView + Copy + 'static + Send + Sync, +{ + let login = use_context::>>().unwrap(); + let login_fetching = expect_context::>(); + view! { + } + > + + + } + } + > +
+ +
+ +
+ {element} +
+
+
+ + } +} + +#[component] +fn AppLoading() -> impl IntoView { + view! { +
+
+ + Loading your workspace... +
+
+ } +} diff --git a/lapdev-dashboard/src/audit_log.rs b/crates/dashboard/src/audit_log.rs similarity index 87% rename from lapdev-dashboard/src/audit_log.rs rename to crates/dashboard/src/audit_log.rs index 8c489a5..ca40849 100644 --- a/lapdev-dashboard/src/audit_log.rs +++ b/crates/dashboard/src/audit_log.rs @@ -2,23 +2,23 @@ use anyhow::{anyhow, Result}; use chrono::{DateTime, FixedOffset, Local, NaiveDate, TimeZone}; use gloo_net::http::Request; use lapdev_common::{console::Organization, AuditLogRecord, AuditLogResult}; -use leptos::{ - component, create_action, create_rw_signal, event_target_value, use_context, view, For, - IntoView, Signal, SignalGet, SignalGetUntracked, SignalSet, SignalUpdate, SignalWith, - SignalWithUntracked, -}; +use leptos::prelude::*; use crate::{ + component::button::Button, datepicker::Datepicker, modal::{DatetimeModal, ErrorResponse}, + organization::get_current_org, }; async fn get_audit_logs( + org: Signal>, start: Option, end: Option, page_size: String, page: u64, ) -> Result { + let org = org.get().ok_or_else(|| anyhow!("can't get org"))?; let start: DateTime = start .and_then(|t| { Local @@ -44,12 +44,6 @@ async fn get_audit_logs( }) .into(); - let org = - use_context::>>().ok_or_else(|| anyhow!("can't get org"))?; - let org = org - .get_untracked() - .ok_or_else(|| anyhow!("can't get org"))?; - let page_size = page_size.parse::().unwrap_or(10); let resp = Request::get(&format!("/api/v1/organizations/{}/audit_logs", org.id)) @@ -79,16 +73,18 @@ async fn get_audit_logs( #[component] pub fn AuditLogView() -> impl IntoView { - let from_date = create_rw_signal(Some(Local::now().date_naive())); - let to_date = create_rw_signal(Some(Local::now().date_naive())); - let page_size = create_rw_signal(String::new()); - let page = create_rw_signal(0); + let from_date = RwSignal::new(Some(Local::now().date_naive())); + let to_date = RwSignal::new(Some(Local::now().date_naive())); + let page_size = RwSignal::new(String::new()); + let page = RwSignal::new(0); + let org = get_current_org(); - let error = create_rw_signal(None); + let error = RwSignal::new(None); - let get_action = create_action(move |()| async move { + let get_action = Action::new_local(move |()| async move { error.set(None); let result = get_audit_logs( + org, from_date.get_untracked(), to_date.get_untracked(), page_size.get_untracked(), @@ -155,22 +151,21 @@ pub fn AuditLogView() -> impl IntoView { to - + { move || if let Some(error) = error.get() { view! {
{ error }
- }.into_view() + }.into_any() } else { - view!{}.into_view() + ().into_any() }}
@@ -200,7 +195,7 @@ pub fn AuditLogView() -> impl IntoView { class=("text-gray-300", move || audit_logs.with(|a| a.page == 0)) class=("cursor-pointer", move || !audit_logs.with(|a| a.page == 0)) class=("hover:bg-gray-100", move || !audit_logs.with(|a| a.page == 0)) - disabled=move || audit_logs.with(|a| a.page == 0) + class=("disabled", move || audit_logs.with(|a| a.page == 0)) on:click=prev_page >
+
+
+ + + + + + Lapdev +
+ + + + +
+

"Error"

+

{move || error.get().unwrap_or_default()}

+ } + } + > +
+ + + +
+

+ "Authenticating CLI..." +

+

+ "Generating your authentication token." +

+

+ "Please wait, redirecting to success page..." +

+ +
+
+ } +} + +async fn generate_token(session_id: Uuid, device_name: String) -> Result<(), String> { + let req = GenerateTokenRequest { + session_id, + device_name, + }; + + let _response: GenerateTokenResponse = Request::post("/api/v1/auth/cli/generate-token") + .json(&req) + .map_err(|e| format!("Failed to serialize request: {}", e))? + .send() + .await + .map_err(|e| format!("Failed to send request: {}", e))? + .json() + .await + .map_err(|e| format!("Failed to parse response: {}", e))?; + + Ok(()) +} diff --git a/crates/dashboard/src/cli_success.rs b/crates/dashboard/src/cli_success.rs new file mode 100644 index 0000000..d77dcc4 --- /dev/null +++ b/crates/dashboard/src/cli_success.rs @@ -0,0 +1,40 @@ +use leptos::prelude::*; + +#[component] +pub fn CliSuccess() -> impl IntoView { + view! { +
+
+
+ + + + + + Lapdev +
+
+ + + +
+

+ "CLI Authenticated Successfully" +

+

+ "You can close this window and return to your terminal." +

+
+
+ } +} diff --git a/lapdev-dashboard/src/cluster.rs b/crates/dashboard/src/cluster.rs similarity index 76% rename from lapdev-dashboard/src/cluster.rs rename to crates/dashboard/src/cluster.rs index b998adf..9308417 100644 --- a/lapdev-dashboard/src/cluster.rs +++ b/crates/dashboard/src/cluster.rs @@ -7,18 +7,14 @@ use lapdev_common::{ NewWorkspaceHost, OauthSettings, UpdateClusterUser, UpdateMachineType, UpdateWorkspaceHost, WorkspaceHost, }; -use leptos::{ - component, create_action, create_effect, create_local_resource, create_rw_signal, document, - event_target_checked, event_target_value, expect_context, leptos_dom::helpers::location, - set_timeout, view, Action, For, IntoView, RwSignal, Signal, SignalGet, SignalGetUntracked, - SignalSet, SignalUpdate, SignalWith, SignalWithUntracked, -}; +use leptos::prelude::*; use uuid::Uuid; use wasm_bindgen::{JsCast, UnwrapThrowExt}; use web_sys::FocusEvent; -use crate::modal::{ - CreationInput, CreationModal, DatetimeModal, DeletionModal, ErrorResponse, SettingView, +use crate::{ + component::input::Input, + modal::{CreationInput, DatetimeModal, DeleteModal, ErrorResponse, Modal, SettingView}, }; async fn get_all_workspaces() -> Result> { @@ -27,15 +23,20 @@ async fn get_all_workspaces() -> Result> { Ok(hosts) } +pub fn get_cluster_info() -> Signal, LocalStorage> { + expect_context::, LocalStorage>>() +} + #[component] pub fn WorkspaceHostView() -> impl IntoView { - let new_workspace_host_modal_hidden = create_rw_signal(true); - let update_counter = create_rw_signal(0); - let hosts = create_local_resource( - move || update_counter.get(), - move |_| async move { get_all_workspaces().await }, - ); - let host_filter = create_rw_signal(String::new()); + let new_workspace_host_modal_open = RwSignal::new(false); + let update_counter = RwSignal::new_local(0); + let hosts = LocalResource::new(move || async move { get_all_workspaces().await }); + Effect::new(move |_| { + update_counter.track(); + hosts.refetch(); + }); + let host_filter = RwSignal::new_local(String::new()); let hosts = Signal::derive(move || { let host_filter = host_filter.get(); let mut hosts = hosts.with(|hosts| { @@ -77,7 +78,7 @@ pub fn WorkspaceHostView() -> impl IntoView { @@ -95,23 +96,23 @@ pub fn WorkspaceHostView() -> impl IntoView { } /> - + } } #[component] pub fn NewWorkspaceHostView( - new_workspace_host_modal_hidden: RwSignal, - update_counter: RwSignal, + new_workspace_host_modal_open: RwSignal, + update_counter: RwSignal, ) -> impl IntoView { - let cluster_info = expect_context::>>(); - let host = create_rw_signal(String::new()); - let region = create_rw_signal(String::new()); - let zone = create_rw_signal(String::new()); - let cpu = create_rw_signal(String::new()); - let memory = create_rw_signal(String::new()); - let disk = create_rw_signal(String::new()); - let action = create_action(move |_| { + let cluster_info = get_cluster_info(); + let host = RwSignal::new_local(String::new()); + let region = RwSignal::new_local(String::new()); + let zone = RwSignal::new_local(String::new()); + let cpu = RwSignal::new_local(String::new()); + let memory = RwSignal::new_local(String::new()); + let disk = RwSignal::new_local(String::new()); + let action = Action::new_local(move |_| { create_workspace_host( host.get_untracked(), region.get_untracked(), @@ -120,43 +121,46 @@ pub fn NewWorkspaceHostView( memory.get_untracked(), disk.get_untracked(), update_counter, - new_workspace_host_modal_hidden, + new_workspace_host_modal_open, ) }); - let body = view! { - -
- -
-
- -
- - - - }; view! { - + + +
+ +
+
+ +
+ + + +
} } #[component] pub fn UpdateWorkspaceHostModal( host: WorkspaceHost, - update_workspace_host_modal_hidden: RwSignal, - update_counter: RwSignal, + update_workspace_host_modal_open: RwSignal, + update_counter: RwSignal, ) -> impl IntoView { - let cluster_info = expect_context::>>(); - let region = create_rw_signal(host.region.clone()); - let zone = create_rw_signal(host.zone.clone()); - let cpu = create_rw_signal(host.cpu.to_string()); - let memory = create_rw_signal(host.memory.to_string()); - let disk = create_rw_signal(host.disk.to_string()); - let action = create_action(move |_| { + let cluster_info = get_cluster_info(); + let region = RwSignal::new_local(host.region.clone()); + let zone = RwSignal::new_local(host.zone.clone()); + let cpu = RwSignal::new_local(host.cpu.to_string()); + let memory = RwSignal::new_local(host.memory.to_string()); + let disk = RwSignal::new_local(host.disk.to_string()); + let action = Action::new_local(move |_| { update_workspace_host( host.id, region.get_untracked(), @@ -165,26 +169,31 @@ pub fn UpdateWorkspaceHostModal( memory.get_untracked(), disk.get_untracked(), update_counter, - update_workspace_host_modal_hidden, + update_workspace_host_modal_open, ) }); - let body = view! { -
- -
-
- -
- - - - }; view! { - + +
+ +
+
+ +
+ + + +
} } @@ -196,8 +205,8 @@ async fn create_workspace_host( cpu: String, memory: String, disk: String, - update_counter: RwSignal, - new_workspace_host_modal_hidden: RwSignal, + update_counter: RwSignal, + new_workspace_host_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let cpu: usize = match cpu.parse() { Ok(n) => n, @@ -244,7 +253,7 @@ async fn create_workspace_host( return Err(error); } update_counter.update(|c| *c += 1); - new_workspace_host_modal_hidden.set(true); + new_workspace_host_modal_open.set(false); Ok(()) } @@ -256,8 +265,8 @@ async fn update_workspace_host( cpu: String, memory: String, disk: String, - update_counter: RwSignal, - update_workspace_host_modal_hidden: RwSignal, + update_counter: RwSignal, + update_workspace_host_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let cpu: usize = match cpu.parse() { Ok(n) => n, @@ -304,14 +313,14 @@ async fn update_workspace_host( return Err(error); } update_counter.update(|c| *c += 1); - update_workspace_host_modal_hidden.set(true); + update_workspace_host_modal_open.set(false); Ok(()) } async fn delete_workspace_host( id: Uuid, - update_counter: RwSignal, - delete_modal_hidden: RwSignal, + update_counter: RwSignal, + delete_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let resp = Request::delete(&format!("/api/v1/admin/workspace_hosts/{id}")) .send() @@ -326,19 +335,22 @@ async fn delete_workspace_host( return Err(error); } update_counter.update(|c| *c += 1); - delete_modal_hidden.set(true); + delete_modal_open.set(false); Ok(()) } #[component] -fn WorkspaceHostItem(host: WorkspaceHost, update_counter: RwSignal) -> impl IntoView { - let cluster_info = expect_context::>>(); - let update_workspace_host_modal_hidden = create_rw_signal(true); - let delete_modal_hidden = create_rw_signal(true); +fn WorkspaceHostItem( + host: WorkspaceHost, + update_counter: RwSignal, +) -> impl IntoView { + let cluster_info = get_cluster_info(); + let update_workspace_host_modal_open = RwSignal::new(false); + let delete_modal_open = RwSignal::new(false); let delete_action = { let id = host.id; - create_action(move |_| async move { - delete_workspace_host(id, update_counter, delete_modal_hidden).await + Action::new_local(move |_| async move { + delete_workspace_host(id, update_counter, delete_modal_open).await }) }; view! { @@ -365,22 +377,22 @@ fn WorkspaceHostItem(host: WorkspaceHost, update_counter: RwSignal) -> impl

{"Disk:"}{format!("{}GB/{}GB", host.disk - host.available_disk, host.disk)}

- +
- - + + } } #[component] fn WorkspaceHostControl( - delete_modal_hidden: RwSignal, - update_workspace_host_modal_hidden: RwSignal, + delete_modal_open: RwSignal, + update_workspace_host_modal_open: RwSignal, align_right: bool, ) -> impl IntoView { - let dropdown_hidden = create_rw_signal(true); + let dropdown_hidden = RwSignal::new_local(true); let toggle_dropdown = move |_| { if dropdown_hidden.get_untracked() { @@ -415,7 +427,7 @@ fn WorkspaceHostControl( let delete_workspace_host = { move |_| { dropdown_hidden.set(true); - delete_modal_hidden.set(false); + delete_modal_open.set(true); } }; @@ -445,7 +457,7 @@ fn WorkspaceHostControl( class="block px-4 py-2 hover:bg-gray-100" on:click=move |_| { dropdown_hidden.set(true); - update_workspace_host_modal_hidden.set(false); + update_workspace_host_modal_open.set(true); } > Update @@ -468,7 +480,7 @@ fn WorkspaceHostControl( #[component] pub fn ClusterSettings() -> impl IntoView { - let cluster_info = expect_context::>>(); + let cluster_info = get_cluster_info(); view! {
@@ -535,13 +547,14 @@ async fn get_cpu_overcommit() -> Result { #[component] fn CpuOvercommitSetting() -> impl IntoView { - let update_counter = create_rw_signal(0); - let current_value = create_local_resource( - move || update_counter.get(), - move |_| async move { get_cpu_overcommit().await }, - ); - let value = create_rw_signal(String::new()); - create_effect(move |_| { + let update_counter = RwSignal::new_local(0); + let current_value = LocalResource::new(move || async move { get_cpu_overcommit().await }); + Effect::new(move |_| { + update_counter.get(); + current_value.refetch(); + }); + let value = RwSignal::new_local(String::new()); + Effect::new(move |_| { let v = current_value.with(|v| v.as_ref().and_then(|v| v.as_ref().ok().copied())); if let Some(v) = v { value.set(v.to_string()); @@ -549,17 +562,19 @@ fn CpuOvercommitSetting() -> impl IntoView { }); let body = view! {
-
}; let save_action = - create_action(move |_| async move { update_cpu_overcommit(value.get_untracked()).await }); + Action::new_local( + move |_| async move { update_cpu_overcommit(value.get_untracked()).await }, + ); view! { } @@ -567,7 +582,7 @@ fn CpuOvercommitSetting() -> impl IntoView { async fn update_oauth2( update_oauth: OauthSettings, - update_counter: RwSignal, + update_counter: RwSignal, reload: bool, ) -> Result<(), ErrorResponse> { let resp = Request::put("/api/v1/admin/oauth") @@ -602,18 +617,19 @@ async fn get_oauth2() -> Result { #[component] pub fn OauthSettings(reload: bool) -> impl IntoView { - let update_counter = create_rw_signal(0); - let github_client_id = create_rw_signal(String::new()); - let github_client_secret = create_rw_signal(String::new()); - let gitlab_client_id = create_rw_signal(String::new()); - let gitlab_client_secret = create_rw_signal(String::new()); - - let oauth = create_local_resource( - move || update_counter.get(), - move |_| async move { get_oauth2().await }, - ); - - create_effect(move |_| { + let update_counter = RwSignal::new_local(0); + let github_client_id = RwSignal::new_local(String::new()); + let github_client_secret = RwSignal::new_local(String::new()); + let gitlab_client_id = RwSignal::new_local(String::new()); + let gitlab_client_secret = RwSignal::new_local(String::new()); + + let oauth = LocalResource::new(move || async move { get_oauth2().await }); + Effect::new(move |_| { + update_counter.track(); + oauth.refetch(); + }); + + Effect::new(move |_| { let oauth = oauth.with(|oauth| oauth.as_ref().and_then(|o| o.as_ref().ok().cloned())); if let Some(oauth) = oauth { github_client_id.set(oauth.github_client_id); @@ -631,10 +647,10 @@ pub fn OauthSettings(reload: bool) -> impl IntoView { GitHub Client Id
-
@@ -644,10 +660,10 @@ pub fn OauthSettings(reload: bool) -> impl IntoView { GitHub Client Secret
-
@@ -655,10 +671,10 @@ pub fn OauthSettings(reload: bool) -> impl IntoView { GitLab Client Id
-
@@ -666,14 +682,14 @@ pub fn OauthSettings(reload: bool) -> impl IntoView { GitLab Client Secret
- }; - let save_action = create_action(move |_| async move { + let save_action = Action::new_local(move |_| async move { update_oauth2( OauthSettings { github_client_id: github_client_id.get_untracked(), @@ -699,7 +715,7 @@ async fn get_hostnames() -> Result> { async fn update_hostnames( value: Vec<(String, String)>, - update_counter: RwSignal, + update_counter: RwSignal, ) -> Result<(), ErrorResponse> { let resp = Request::put("/api/v1/admin/hostnames") .json(&value)? @@ -720,22 +736,23 @@ async fn update_hostnames( #[component] pub fn ClusterHostnameSetting() -> impl IntoView { - let cluster_info = expect_context::>>(); + let cluster_info = get_cluster_info(); - let update_counter = create_rw_signal(0); + let update_counter = RwSignal::new_local(0); - let set_hostnames = create_rw_signal(vec![]); + let set_hostnames = RwSignal::new_local(vec![]); - let hostnames = create_local_resource( - move || update_counter.get(), - |_| async move { get_hostnames().await.unwrap_or_default() }, - ); + let hostnames = LocalResource::new(|| async move { get_hostnames().await.unwrap_or_default() }); + Effect::new(move |_| { + update_counter.track(); + hostnames.refetch(); + }); - create_effect(move |_| { - let hostnames = hostnames.with(|h| h.clone()).unwrap_or_default(); - let mut hostnames: Vec<(String, RwSignal)> = hostnames + Effect::new(move |_| { + let hostnames = hostnames.get().unwrap_or_default(); + let mut hostnames: Vec<(String, RwSignal)> = hostnames .into_iter() - .map(|(key, value)| (key, create_rw_signal(value))) + .map(|(key, value)| (key, RwSignal::new_local(value))) .collect(); hostnames.sort_by_key(|(region, _)| region.to_owned()); set_hostnames.set(hostnames); @@ -755,36 +772,36 @@ pub fn ClusterHostnameSetting() -> impl IntoView {
{format!("Region: {region}")}
- } } /> - }.into_view() + }.into_any() } else { view! {
-
- }.into_view() + }.into_any() } } } }; - let save_action = create_action(move |_| async move { + let save_action = Action::new_local(move |_| async move { let value = set_hostnames .get_untracked() .into_iter() @@ -834,12 +851,12 @@ async fn update_certs(certs: Vec<(String, String)>) -> Result<(), ErrorResponse> #[component] pub fn ClusterCertsSetting() -> impl IntoView { - let certs = create_rw_signal(vec![]); + let certs = RwSignal::new_local(vec![]); let new_cert = move |_| { certs.update(|certs| { certs.push(( - create_rw_signal(String::new()), - create_rw_signal(String::new()), + RwSignal::new_local(String::new()), + RwSignal::new_local(String::new()), )); }); }; @@ -849,22 +866,25 @@ pub fn ClusterCertsSetting() -> impl IntoView { }) }; - let update_counter = create_rw_signal(0); - let current_certs = create_local_resource( - move || update_counter.get(), - move |_| async move { get_certs().await.unwrap_or_default() }, - ); - create_effect(move |_| { - let current_certs = current_certs.with(|certs| certs.clone().unwrap_or_default()); + let update_counter = RwSignal::new_local(0); + let current_certs = + LocalResource::new(move || async move { get_certs().await.unwrap_or_default() }); + Effect::new(move |_| { + update_counter.track(); + current_certs.refetch(); + }); + + Effect::new(move |_| { + let current_certs = current_certs.get().unwrap_or_default(); certs.update(|certs| { *certs = current_certs .into_iter() - .map(|(name, value)| (create_rw_signal(name), create_rw_signal(value))) + .map(|(name, value)| (RwSignal::new_local(name), RwSignal::new_local(value))) .collect(); }); }); - let save_action = create_action(move |_| async move { + let save_action = Action::new_local(move |_| async move { let certs = certs.get_untracked(); let certs: Vec<(String, String)> = certs .into_iter() @@ -931,7 +951,7 @@ pub fn ClusterCertsSetting() -> impl IntoView { > New Certificate - }.into_view(); + }.into_any(); view! { @@ -946,17 +966,18 @@ async fn all_machine_types() -> Result> { #[component] pub fn MachineTypeView() -> impl IntoView { - let new_machine_type_modal_hidden = create_rw_signal(true); - let update_counter = create_rw_signal(0); - let machine_types = create_local_resource( - move || update_counter.get(), - |_| async move { all_machine_types().await.unwrap_or_default() }, - ); - let machine_type_filter = create_rw_signal(String::new()); + let new_machine_type_modal_open = RwSignal::new(false); + let update_counter = RwSignal::new_local(0); + let machine_types = + LocalResource::new(|| async move { all_machine_types().await.unwrap_or_default() }); + Effect::new(move |_| { + update_counter.track(); + machine_types.refetch(); + }); + let machine_type_filter = RwSignal::new_local(String::new()); let machine_types = Signal::derive(move || { let machine_type_filter = machine_type_filter.get(); - let mut machine_types = - machine_types.with(|machine_types| machine_types.clone().unwrap_or_default()); + let mut machine_types = machine_types.get().unwrap_or_default(); machine_types.retain(|h| h.name.contains(&machine_type_filter)); machine_types }); @@ -989,7 +1010,7 @@ pub fn MachineTypeView() -> impl IntoView { @@ -1007,18 +1028,21 @@ pub fn MachineTypeView() -> impl IntoView { } /> - + } } #[component] -fn MachineTypeItem(machine_type: MachineType, update_counter: RwSignal) -> impl IntoView { - let update_machine_type_modal_hidden = create_rw_signal(true); - let delete_modal_hidden = create_rw_signal(true); +fn MachineTypeItem( + machine_type: MachineType, + update_counter: RwSignal, +) -> impl IntoView { + let update_machine_type_modal_open = RwSignal::new(false); + let delete_modal_open = RwSignal::new(false); let delete_action = { let id = machine_type.id; - create_action(move |_| async move { - delete_machine_type(id, update_counter, delete_modal_hidden).await + Action::new_local(move |_| async move { + delete_machine_type(id, update_counter, delete_modal_open).await }) }; view! { @@ -1036,28 +1060,30 @@ fn MachineTypeItem(machine_type: MachineType, update_counter: RwSignal) ->

{"Disk:"}{format!("{}GB", machine_type.disk)}

- +
- - + + } } #[component] pub fn NewMachineTypeView( - new_machine_type_modal_hidden: RwSignal, - update_counter: RwSignal, + new_machine_type_modal_open: RwSignal, + update_counter: RwSignal, ) -> impl IntoView { - let name = create_rw_signal(String::new()); - let cpu = create_rw_signal(String::new()); - let memory = create_rw_signal(String::new()); - let disk = create_rw_signal(String::new()); - let cost = create_rw_signal("0".to_string()); - let shared_cpu = create_rw_signal(false); - let cluster_info = expect_context::>>(); - let action = create_action(move |_| { + let name = RwSignal::new_local(String::new()); + let cpu = RwSignal::new_local(String::new()); + let memory = RwSignal::new_local(String::new()); + let disk = RwSignal::new_local(String::new()); + let cost = RwSignal::new_local("0".to_string()); + let shared_cpu = RwSignal::new_local(false); + let cluster_info = get_cluster_info(); + let action = Action::new_local(move |_| { create_machine_type( name.get_untracked(), cpu.get_untracked(), @@ -1066,78 +1092,86 @@ pub fn NewMachineTypeView( disk.get_untracked(), cost.get_untracked(), update_counter, - new_machine_type_modal_hidden, + new_machine_type_modal_open, ) }); - let body = view! { - -
-
- + +
+
+ +
+
- -
- - - -
- -
- }; - view! { - + + + +
+ +
+ } } #[component] pub fn UpdateMachineTypeModal( machine_type: MachineType, - update_machine_type_modal_hidden: RwSignal, - update_counter: RwSignal, + update_machine_type_modal_open: RwSignal, + update_counter: RwSignal, ) -> impl IntoView { - let name = create_rw_signal(machine_type.name.clone()); - let cost = create_rw_signal(machine_type.cost_per_second.to_string()); - let cluster_info = expect_context::>>(); - let action = create_action(move |_| { + let name = RwSignal::new_local(machine_type.name.clone()); + let cost = RwSignal::new_local(machine_type.cost_per_second.to_string()); + let cluster_info = get_cluster_info(); + let action = Action::new_local(move |_| { update_machine_type( machine_type.id, name.get_untracked(), cost.get_untracked(), update_counter, - update_machine_type_modal_hidden, + update_machine_type_modal_open, ) }); - let body = view! { - -
- -
- }; view! { - + + +
+ +
+
} } #[component] fn MachineTypeControl( - delete_modal_hidden: RwSignal, - update_machine_type_modal_hidden: RwSignal, + delete_modal_open: RwSignal, + update_machine_type_modal_open: RwSignal, align_right: bool, ) -> impl IntoView { - let dropdown_hidden = create_rw_signal(true); + let dropdown_hidden = RwSignal::new_local(true); let toggle_dropdown = move |_| { if dropdown_hidden.get_untracked() { @@ -1172,7 +1206,7 @@ fn MachineTypeControl( let delete_workspace_host = { move |_| { dropdown_hidden.set(true); - delete_modal_hidden.set(false); + delete_modal_open.set(true); } }; @@ -1202,7 +1236,7 @@ fn MachineTypeControl( class="block px-4 py-2 hover:bg-gray-100" on:click=move |_| { dropdown_hidden.set(true); - update_machine_type_modal_hidden.set(false); + update_machine_type_modal_open.set(true); } > Update @@ -1231,8 +1265,8 @@ async fn create_machine_type( memory: String, disk: String, cost: String, - update_counter: RwSignal, - new_workspace_host_modal_hidden: RwSignal, + update_counter: RwSignal, + new_workspace_host_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let cpu: usize = match cpu.parse() { Ok(n) => n, @@ -1287,7 +1321,7 @@ async fn create_machine_type( return Err(error); } update_counter.update(|c| *c += 1); - new_workspace_host_modal_hidden.set(true); + new_workspace_host_modal_open.set(false); Ok(()) } @@ -1295,8 +1329,8 @@ async fn update_machine_type( id: Uuid, name: String, cost: String, - update_counter: RwSignal, - update_workspace_host_modal_hidden: RwSignal, + update_counter: RwSignal, + update_workspace_host_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let cost: usize = match cost.parse() { Ok(n) => n, @@ -1324,14 +1358,14 @@ async fn update_machine_type( return Err(error); } update_counter.update(|c| *c += 1); - update_workspace_host_modal_hidden.set(true); + update_workspace_host_modal_open.set(false); Ok(()) } async fn delete_machine_type( id: Uuid, - update_counter: RwSignal, - delete_modal_hidden: RwSignal, + update_counter: RwSignal, + delete_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let resp = Request::delete(&format!("/api/v1/admin/machine_types/{id}")) .send() @@ -1346,7 +1380,7 @@ async fn delete_machine_type( return Err(error); } update_counter.update(|c| *c += 1); - delete_modal_hidden.set(true); + delete_modal_open.set(false); Ok(()) } @@ -1389,7 +1423,7 @@ async fn update_cluster_user( id: Uuid, cluster_admin: bool, search_action: Action<(), Result>, - update_modal_hidden: RwSignal, + update_modal_open: RwSignal, ) -> Result<(), ErrorResponse> { let resp = Request::put(&format!("/api/v1/admin/users/{id}")) .json(&UpdateClusterUser { cluster_admin })? @@ -1405,18 +1439,18 @@ async fn update_cluster_user( return Err(error); } search_action.dispatch(()); - update_modal_hidden.set(true); + update_modal_open.set(false); Ok(()) } #[component] pub fn ClusterUsersView() -> impl IntoView { - let page_size = create_rw_signal(String::new()); - let page = create_rw_signal(0); + let page_size = RwSignal::new_local(String::new()); + let page = RwSignal::new_local(0); - let error = create_rw_signal(None); - let user_filter = create_rw_signal(String::new()); - let search_action = create_action(move |_| async move { + let error = RwSignal::new_local(None); + let user_filter = RwSignal::new_local(String::new()); + let search_action = Action::new_local(move |_| async move { error.set(None); let result = get_cluster_users( user_filter.get_untracked(), @@ -1497,7 +1531,7 @@ pub fn ClusterUsersView() -> impl IntoView { @@ -1509,9 +1543,9 @@ pub fn ClusterUsersView() -> impl IntoView {
{ error }
- }.into_view() + }.into_any() } else { - view!{}.into_view() + ().into_any() }}
@@ -1537,7 +1571,7 @@ pub fn ClusterUsersView() -> impl IntoView { - impl IntoView { - + = a.num_pages)) class=("cursor-pointer", move || !users.with(|a| a.page + 1 >= a.num_pages)) @@ -1595,38 +1629,15 @@ fn ClusterUserItemView( search_action: Action<(), Result>, ) -> impl IntoView { let user_id = user.id; - let update_modal_hidden = create_rw_signal(true); - let is_cluster_admin = create_rw_signal(user.cluster_admin); + let update_modal_open = RwSignal::new(false); + let is_cluster_admin = RwSignal::new_local(user.cluster_admin); - let update_modal_body = view! { -
-
- -
- -
- }; - - let update_action = create_action(move |()| async move { + let update_action = Action::new_local(move |()| async move { update_cluster_user( user_id, is_cluster_admin.get_untracked(), search_action, - update_modal_hidden, + update_modal_open, ) .await }); @@ -1636,17 +1647,17 @@ fn ClusterUserItemView( - }, + }.into_any(), AuthProvider::Gitlab => view! { - }, + }.into_any(), }; view! { 0) > - +
{icon}

{user.auth_provider.to_string()}

@@ -1674,12 +1685,39 @@ fn ClusterUserItemView( - + +
+
+ +
+ +
+
} } diff --git a/crates/dashboard/src/component/alert.rs b/crates/dashboard/src/component/alert.rs new file mode 100644 index 0000000..fc9d1d4 --- /dev/null +++ b/crates/dashboard/src/component/alert.rs @@ -0,0 +1,81 @@ +use leptos::prelude::*; +use tailwind_fuse::*; + +#[derive(PartialEq, TwVariant)] +pub enum AlertVariant { + #[tw(default, class = "bg-card text-card-foreground")] + Default, + #[tw( + class = "text-destructive bg-card [&>svg]:text-current *:data-[slot=alert-description]:text-destructive/90" + )] + Destructive, +} + +#[component] +pub fn Alert( + #[prop(into, optional)] variant: Signal, + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
svg]:grid-cols-[calc(var(--spacing)*4)_1fr] grid-cols-[0_1fr] has-[>svg]:gap-x-3 gap-y-0.5 items-start [&>svg]:size-4 [&>svg]:translate-y-0.5 [&>svg]:text-current", + variant.get(), + class.get() + ) + data-slot="alert" + role="alert" + > + {children} +
+ } +} + +#[component] +pub fn AlertTitle( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
+ {children} +
+ } +} + +#[component] +pub fn AlertDescription( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
+ {children} +
+ } +} diff --git a/crates/dashboard/src/component/alert_dialog.rs b/crates/dashboard/src/component/alert_dialog.rs new file mode 100644 index 0000000..56d6393 --- /dev/null +++ b/crates/dashboard/src/component/alert_dialog.rs @@ -0,0 +1,172 @@ +use leptos::prelude::*; +use tailwind_fuse::*; + +#[component] +pub fn AlertDialogTrigger( + #[prop()] open: RwSignal, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
+ {children} +
+ } +} + +#[component] +pub fn AlertDialogContent( + #[prop()] open: RwSignal, + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let hide_delay = std::time::Duration::from_millis(100); + let handle: StoredValue> = StoredValue::new(None); + let show = RwSignal::new(open.get_untracked()); + + let eff = RenderEffect::new(move |_| { + if open.get() { + // clear any possibly active timer + if let Some(h) = handle.get_value() { + h.clear(); + } + + show.set(true); + } else { + let h = leptos::leptos_dom::helpers::set_timeout_with_handle( + move || show.set(false), + hide_delay, + ) + .expect("set timeout in AnimatedShow"); + handle.set_value(Some(h)); + } + }); + + on_cleanup(move || { + if let Some(Some(h)) = handle.try_get_value() { + h.clear(); + } + drop(eff); + }); + + view! { + +
+
+ {children.clone().map(|c| c().into_any()).unwrap_or_else(|| ().into_any())} +
+
+ } +} + +#[component] +pub fn AlertDialogHeader( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
+ {children} +
+ } +} + +#[component] +pub fn AlertDialogFooter( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
+ {children} +
+ } +} + +#[component] +pub fn AlertDialogTitle( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { + + {children} + + } +} + +#[component] +pub fn AlertDialogDescription( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { + + {children} + + } +} diff --git a/crates/dashboard/src/component/avatar.rs b/crates/dashboard/src/component/avatar.rs new file mode 100644 index 0000000..22170ad --- /dev/null +++ b/crates/dashboard/src/component/avatar.rs @@ -0,0 +1,41 @@ +use leptos::prelude::*; +use tailwind_fuse::*; + +#[component] +pub fn Avatar( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { + + {children} + + } +} + +#[component] +pub fn AvatarImage( + #[prop(into, optional)] class: MaybeProp, + #[prop(into, optional)] src: MaybeProp, +) -> impl IntoView { + view! { + + } +} diff --git a/crates/dashboard/src/component/badge.rs b/crates/dashboard/src/component/badge.rs new file mode 100644 index 0000000..5f2a4be --- /dev/null +++ b/crates/dashboard/src/component/badge.rs @@ -0,0 +1,44 @@ +use leptos::prelude::*; +use tailwind_fuse::*; + +#[derive(PartialEq, TwVariant)] +pub enum BadgeVariant { + #[tw( + default, + class = "border-transparent bg-primary text-primary-foreground [a&]:hover:bg-primary/90" + )] + Default, + #[tw( + class = "border-transparent bg-secondary text-secondary-foreground [a&]:hover:bg-secondary/90" + )] + Secondary, + #[tw( + class = "border-transparent bg-destructive text-white [a&]:hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60" + )] + Destructive, + #[tw(class = "text-foreground [a&]:hover:bg-accent [a&]:hover:text-accent-foreground")] + Outline, +} + +#[component] +pub fn Badge( + #[prop(into, optional)] variant: Signal, + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + view! { + svg]:size-3 gap-1 [&>svg]:pointer-events-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive transition-[color,box-shadow] overflow-hidden", + variant.get(), + class.get() + ) + data-slot="badge" + > + {children} + + } +} diff --git a/crates/dashboard/src/component/breadcrumb.rs b/crates/dashboard/src/component/breadcrumb.rs new file mode 100644 index 0000000..513e719 --- /dev/null +++ b/crates/dashboard/src/component/breadcrumb.rs @@ -0,0 +1,136 @@ +use leptos::prelude::*; +use lucide_leptos::ChevronRight; +use tailwind_fuse::*; + +#[component] +pub fn Breadcrumb(#[prop(optional)] children: Option) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { + + } +} + +#[component] +pub fn BreadcrumbList( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
    + {children} +
+ } +} + +#[component] +pub fn BreadcrumbItem( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { +
  • + {children} +
  • + } +} + +#[component] +pub fn BreadcrumbPage( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { + + {children} + + } +} + +#[component] +pub fn BreadcrumbSeparator( + #[prop(into, optional)] class: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| view! { }.into_any()); + + view! { + svg]:size-3.5", + class.get() + ) + data-slot="breadcrumb-separator" + role="presentation" + aria-hidden="true" + > + {children} + + } +} + +#[component] +pub fn BreadcrumbLink( + #[prop(into, optional)] class: MaybeProp, + #[prop(into, optional)] href: MaybeProp, + #[prop(optional)] children: Option, +) -> impl IntoView { + let children = children + .map(|c| c().into_any()) + .unwrap_or_else(|| ().into_any()); + + view! { + + {children} + + } +} diff --git a/crates/dashboard/src/component/button.rs b/crates/dashboard/src/component/button.rs new file mode 100644 index 0000000..9eaeab6 --- /dev/null +++ b/crates/dashboard/src/component/button.rs @@ -0,0 +1,186 @@ +use leptos::prelude::*; +// use leptos::{ev::MouseEvent, prelude::*}; +// use leptos_node_ref::AnyNodeRef; +// use leptos_struct_component::{struct_component, StructComponent}; +// use leptos_style::Style; +use tailwind_fuse::*; + +#[derive(TwClass)] +#[tw( + class = "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive" +)] +pub struct ButtonClass { + pub variant: ButtonVariant, + pub size: ButtonSize, +} + +#[derive(PartialEq, TwVariant)] +pub enum ButtonVariant { + #[tw( + default, + class = "bg-primary text-primary-foreground shadow-xs hover:bg-primary/90" + )] + Default, + #[tw( + class = "bg-destructive text-white shadow-xs hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60" + )] + Destructive, + #[tw( + class = "border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50" + )] + Outline, + #[tw(class = "bg-secondary text-secondary-foreground shadow-xs hover:bg-secondary/80")] + Secondary, + #[tw(class = "hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50")] + Ghost, + #[tw(class = "text-primary underline-offset-4 hover:underline")] + Link, +} + +#[derive(PartialEq, TwVariant)] +pub enum ButtonSize { + #[tw(default, class = "h-9 px-4 py-2 has-[>svg]:px-3")] + Default, + #[tw(class = "h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5")] + Sm, + #[tw(class = "h-10 rounded-md px-6 has-[>svg]:px-4")] + Lg, + #[tw(class = "size-9")] + Icon, +} + +// #[derive(Clone, StructComponent)] +// #[struct_component(tag = "button")] +// pub struct ButtonChildProps { +// pub node_ref: AnyNodeRef, + +// // Global attributes +// pub autofocus: Signal, +// pub class: Signal, +// pub id: MaybeProp, +// pub style: Signal + + + Your ClusterLapdev-Kube-ManagerLapdev EnvironmentLapdev EnvironmentLapdev EnvironmentLapdev Sidecar ProxyApp Workload PodApp ContainerDevboxDevbox intercept traffic from clusterLapdev EnvironmentLapdev Devbox ProxyLapdevDevbox client traffic to clusterDevbox client traffic to clusterApp WorkloadPreview URLPreview URL traffic to clusterPreview URL traffic to clusterApp WorkloadDevbox intercept traffic from cluster \ No newline at end of file diff --git a/docs/.gitbook/assets/file.excalidraw (2).svg b/docs/.gitbook/assets/file.excalidraw (2).svg new file mode 100644 index 0000000..fe752e9 --- /dev/null +++ b/docs/.gitbook/assets/file.excalidraw (2).svg @@ -0,0 +1,8 @@ + + + eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nO19a1dcIk2y7vf5XHUwMDE1vfp8nJGd96ycs/ZcdTAwMDdcdTAwMDRcdTAwMWGxLVx1MDAxMFx1MDAwNWk8a69eUCByXHUwMDEzW0Gg9pr/flwiXCJLXHUwMDAxXHUwMDA1RdtbvzP9rlx1MDAxOaSoS17iiXjikln/+7cvX76O55ftr//88rU9i1x1MDAxYYNu66ox/fpcdTAwMGY8ftO+uu6OLuAnQd+vR5OriM48XHUwMDFmjy+v//lf/9W4vEx1uuPmaNRPRaOhv6w9aFx1MDAwZttcdTAwMTfjazjx/8H3L1/+l/5/6UHdYaPTppPp8OI5RrH7R4ujXHUwMDBieiZcdTAwMGaccIJxXHUwMDFi3J3Rvc7Cs8btXHUwMDE2/HzWXHUwMDE4XFy3XHUwMDE3v+Chr7Y1uVx1MDAxY+Rcbmc3qsaOz1x1MDAwM9s6aZ12XHUwMDE2jz3rXHUwMDBlXHUwMDA2x+P5gJp0PYKeL367XHUwMDFlX4367Vq3NT6/7f7S8U1XXY0mnfOL9jV2nd9cdTAwMWRcdTAwMWRdNqLueI7H2KJ7jYtcdTAwMGXdY3FkXHUwMDA235RcYlLOXHUwMDE5aVx1MDAwMiFcdTAwMDLrmL77XHUwMDE1r99cdTAwMTFapKyw0jIlJdeBvtewzGgwusKGja9cdTAwMWFcdTAwMTfXl40rmIdF85qNqN+BNl60XHUwMDFlP2+adFuJ1OJcdTAwMDHn7W7nfHz/6HWbRp9LXHUwMDExXHUwMDE4q5VcXExcdTAwMGU+6LLQXCIp+J/FmF81hu1cdTAwMDJecjFcdTAwMTlcZpZcdTAwMDfuopVcZtzKXHUwMDBmTfwhtyRQi1tNLltccj/33Fx1MDAxYaZUoFx1MDAwMqaWRmTQvejfv91gXHUwMDE09deIy/W4MZ7g7b9eti9a3YtVIfHt/WpYk1lcdTAwMTM14VEsUG3t2la1RFPxs0Bx7VpRu9VmrdayXHUwMDEwXHUwMDAxnNp3KMB/O3dS8eVcdTAwMGJP/vpcdTAwMWb6/Nc/1kPlqlx1MDAxZI29qKyBi1BqI1xcLEyG1fJcdTAwMTlwXHUwMDE5Xn0rXFyyi2Bnr9pcdTAwMWGe8+Dgss3231x1MDAwYi7sRXCRRqWEdsZcdMWEXHUwMDE1JlD38GJsylx1MDAxOM1cdTAwMDVjxsLvwmxcdTAwMDLM/+Ft/O/lYDHGpZxyXHUwMDAx/HNKMFx1MDAxMTxEjlQqJaWGmZGOXHUwMDE5+Vx1MDAwMEWg3bTRlkv1+yj63zs5u1x1MDAxNSWZXHUwMDFj+df7gOtRqVx1MDAxZbdn47VcdTAwMDJcdTAwMWSIjVx1MDAwMs2cdUJcYse2XHUwMDE2aMfYzixbyZ/VMzNdmp1x0Tz98bn1v3RBKpBKXG5hXHUwMDAxvEbr+1x1MDAwMs1Uilx1MDAwYsGNkVx1MDAwZcyA1Vx1MDAxYi3Ab1x1MDAwYjRcZnhKcrDIRqyxXHUwMDAyQj6wXHUwMDAxWoFgv470foBccjhcdTAwMWJdjI+7cZtwuHL0W2PYXHUwMDFkzFfEgORcdTAwMTdcdTAwMDawXHUwMDBljOhLZjC5XHUwMDFlt6++rvyaXHUwMDFldDso0V9cdTAwMDfts1VRXHUwMDFmd8Es3P08XHUwMDFlXS5+jeBpje5F++rh6Iyuup3uRWNQefTJ0N/23u1cdTAwMTTxXHUwMDE010uzf93GX/G4e7nFXHUwMDAxadhcYlBQbFpwxrdcdTAwMDdozdXMXFxcXFx1MDAxY9tcdTAwMGL5LVx1MDAxN7mLwfWOTL8qQFuN6/P2q1wilIOOTjlpWFx1MDAxMCggaDK4R9EksymluJKAX2WMejuLY4VMSaUld9qBonBcdTAwMGLcLXE1XHUwMDBidHLp333QauugQ+5cdTAwMTVo24tcZs7dNYur74RjUDrcVTe783LYyt6UxHHjvFpejNaKyDaurkbTr3e//OuWWb2XOdvoz4glXHUwMDA03odcdTAwMGLMXHUwMDFhMPxli/dcdTAwMTRcXHY7vFx1MDAxMeSmw+5cXDUze2w/bs1U43PbM83Bn1x1MDAwMd9Agk/jNNN2XHUwMDE1LGDKUlZcdTAwMDXCKSOcXHUwMDA0s/am/oxYg5ClY7csXGZcZl9cdTAwMTBYqcRaUCzEfJD5se9cdTAwMGUnZ1dcdTAwMTOrxunvs0a4X3JLXHUwMDAy+jV98k2keflqXHUwMDE0XFzmXHUwMDA3de3mwc+Dg69cdTAwMGaE83Paw618XCLebkqtuFx1MDAwMOdcXFx1MDAwYttcdTAwMTTAXHUwMDE1VCRcXNM5LlwiwzlcdTAwMGJcIlx1MDAwZX6hfcwnerZLtJE86o1gXHUwMDBihOIu4Esq8imsXHUwMDE1psG3XFwnXHUwMDE47Vx1MDAwZlx1MDAwN0F08TM/alx1MDAwZlx1MDAwZYefXHUwMDFia0qAXHUwMDA3wlx1MDAxY1xmulxm1mDNiVx1MDAxNLhDgdCgdVx1MDAxZYPab9sl5Kg6YMxcdTAwMDZ8O+ZcdTAwMTjAzFxiJdm/Nd5exD9cdTAwMGZcdTAwMWGXrfbNzvdJs71cdTAwMTM2LsBcbr0zXHJ9tFx1MDAwMW/ORqXYzEZcclx1MDAwMMCAq71AwVOQ/8XKefYtV83HbWZtITrKTc3ruouvz0bBXHUwMDBifNS+SlxmJ25hX39cdTAwMWb0JlhcdTAwMTcsXGbgKFv6t+jlLVx1MDAwMVxyQFc5uzSR68H/eth+daKabY+/22FzZ6dcdTAwMTbt1ic71SAzulx1MDAxMttcdTAwMTHVfzx231attn/YKcSNLj+pXHUwMDFkXHUwMDBitXMhfl69XHUwMDA2XHUwMDAx5lx1MDAwMVx1MDAwYsBcbogl3+Q3XHTw4yjlXHUwMDFig/pWXHUwMDE5cKK03D5IeVx1MDAxMJSuyoN+/ftkf3iROZyE9evq69rl11x1MDAwNyn0MqWMXHUwMDE0ylx1MDAxOVx1MDAwMV7hXHUwMDEyXGJcdTAwMTJcdTAwMTKsUpxJ44RcdTAwMTZMcyv526FUsVx1MDAxNZdRrmHE2tzHKJfAnVx1MDAwMqM3MOKPQt9aPrgkKt9//TR75X2pr4P44rB+fSR+nDe3XHUwMDA131x1MDAxZuR9buLDktlNuFx1MDAxM8xcdFx1MDAwM1x1MDAwMue2XHUwMDA33vrR/OTAsypcdTAwMDWeiLDGcIFjflx1MDAwZnjMpsz7XHUwMDAwj1x1MDAwNzKlXHUwMDE0mDm9ZTTVXHUwMDA02ijH2Hq7+Fx1MDAxNyazX3JcdTAwMTc33avRxXBlXHUwMDFjV7hsXHUwMDA0Py3zzFx1MDAwN2x22G21lm3RKqF9yoas57jr2/X2XHUwMDE095FcdTAwMTRcdTAwMWbyPlx1MDAwZZP0jFxi0jBto1ncOc33vkXFvUb6+PtR9bODWNlcdTAwMTRjgVxitHWCc35cdTAwMGbE4PSCx2uZXHUwMDBiwLrCKW+XXHUwMDEweaHxxFx1MDAxOLFyXHUwMDFml9R7mfE8d+e/Lopn573RbvPXSbvfjLujwWc0cmpj0MdyXHUwMDAzZMvJ7fNcdTAwMTHrO/3J4WF1SmmwXcJaK4Gl3SeXYOKW4KHcm+Hj2TaOa1x1MDAwNlxyXHUwMDEzlv+ZVSOf28g9oerf28htwi+3XHUwMDFiSSqodcYkXHUwMDBinkFS9399T3f3jn5EezaY9E1mNP7Vv/7cUVtcdTAwMGXmJFx1MDAxNVhnMWhrLb9PUkWARSMgqlx1MDAxYXww52wg7zXtXHUwMDAzXHUwMDAxLJx4JFPy743fN1xyuH5cdTAwMTBcdTAwMTl1mt8/uijP1CxcYlx1MDAwMrnksDxcdTAwMTlvvdm5zFxcy72ofjLJsNFRfbhbO/3c9WaATptyVijJueVcdTAwMWM44FxuWFx1MDAxNVMpwZ3Sylx1MDAwMFTBs3wzqCrlUtbYwFx1MDAxYSXAt5Vrqs1cdTAwMTQzKWbAzVx1MDAwN+qpwcN9WLRcdTAwMTnoQMlA86dcdTAwMTIvwaDWY/FBz7D2vDLpmL0mu8h9jtjrfqum+pPuz/71ZTpdj9uz9vn37OdcZtO0XHUwMDA3g+7l9VpcXFx1MDAwNXpzXHUwMDFlw2omnOPB9rj6UVx1MDAxNFfXrU71wF5kTnfKhdNas3DyyXGljUyBQ8U041hdK1aNoNI6ZYUzOC9gL9VcdTAwMWLaQDBpKSZcdTAwMDBVMOZcdTAwMTJcXM91djAwK6c8qCBcdTAwMDBcdTAwMGbRYqB3vcf3PrhcdTAwMTJP4+rjy2OW69vvy700Mlx1MDAxMNY8g/xd/mrHv+z+/OJsYupR++TIXHUwMDFkXFypT07+jLMpXHLUTnBltOD3isk00yluYS7Aj1x1MDAwNZ4l7jfsQ8pjXHUwMDAwnFjf9lR1zNS4ecSiwXg2UzW3e1x1MDAxM+/0omXN/LXQXHUwMDE24uTn9bhYaX1vN6rpefP7JLd8wuvB4z/lM1+e9sSs25imXHUwMDAz4ZQmQCncXHUwMDFhjNdBMNhJp7+FY3VcdTAwMWVe/jLyVN5MPjtcdTAwMTi5TIHrwyzjzlx1MDAxOGVXk+laXHUwMDA2KThcdTAwMDMmLFx1MDAxMGCPg2BcdTAwMTNcdTAwMTh/31x1MDAwZlx1MDAwM4/QbF94bVx1MDAwMuYsNPrdrM3jcHqMrp1fVUrzm/J+e9wql3dNS4ZsLF8hVd5pld1RUG/J3sWvY53m15NcdTAwMWY/dj+MXHUwMDA2voqHedxttaPG1ZfDq9Fs/iE+5oZcdTAwMTa8ZTQo2JyyXHUwMDA05Fx1MDAwMYT1c1KWw9bOwbRcdTAwMTBWJvssPi1cZlx1MDAwZpqX48Fnj1x1MDAwNlx1MDAxOWFT5LOBY1x1MDAwNnZ/XHUwMDExrcXrrVx1MDAwNq+PSbDAhlx1MDAxYmXeMNfBjUltv/JDMumAXHUwMDE1PFnB90db9Fx1MDAxN4E5fXn5pTa66lx1MDAwZkaN1pfDUet9gbz56a9cdTAwMDHizbRebXZnpZXMXHUwMDAxzdk+Zzlvn8yPXHUwMDBmerX2oLq3XHUwMDFi7Dd6vW/fdz47ip1LKfDZmcFaW/BcdTAwMTVXYGyUSjmhncXVXHUwMDE3gJqN3uw70nqtmMNavKd81mwmf3iZb1x1MDAwN9PS0Fx1MDAxNPuXp7PJ5Egvs/Zql/dys3R9b79cXNurjFx1MDAwZXI/09+KXHUwMDFmQevfiIe8JV/4XHUwMDEznYbNXHUwMDFlvFLa8pU1t09BfSZcdTAwMDdB1Ds6qVx1MDAxY11l5sXzfXVZP1x0PjvUlUkx5lx1MDAwMi7BL1x1MDAwMLu96sFcdTAwMWJrUkKDWFjNlVPiXHKTN0BcdTAwMWOAXHUwMDE2SLFt1T1oJuBcdTAwMTj2XHUwMDFko79/ktHO3FrZ97fYa1x1MDAxZf1cdTAwMWHm2iuyNVx1MDAxOFx1MDAxNlLL+4dcdTAwMTf2WohcdTAwMDA84meEn1x1MDAxZtetr1JEMVx1MDAxYY83XHUwMDE1Ubww/mw5T0lQp05pUKGWrfJubcWK6/92NVx1MDAxNFxmXHUwMDFjf1x1MDAxMVx1MDAwMJ3mWjpnzbpcdTAwMWEjw1LCgipxTnInlVnDx3HhuFx1MDAxMlx1MDAxYnbjeFx1MDAwYmh/eFx1MDAwMHrZhl6Nd7vefK40LNmXprBFfIp0RESmeIeBelx1MDAxN5xcdTAwMDE/XHUwMDEyknGG4rFcYvnj0DYusd08tbLOgatcdTAwMDf9XHUwMDA3k/50u1x1MDAxZWe799pcdTAwMTXwQFx1MDAxOWhcdTAwMTKMmeZLe1x1MDAwMSxaJVJMXHUwMDA2MuBcZlgmM4F4mG5cdTAwMWI0rseZ0XDYRUxcdTAwMWSOulx1MDAxN+P7XHUwMDAzTSOaRu1x3m48kFx1MDAwMujV8m8g3t17+eNLvOmq5C3++rJAXCJ9ufv7f/6x9uydzVxiwX9cdTAwMGawsbjfVjxno09jzebIXHUwMDA07sZgnrUzUftcIpNcdTAwMWJcdTAwMDblulx1MDAxOLGzSdNcck5O641XXmr0+jqSOVx1MDAwYlwiXHUwMDA3o44lp87dy9HBaKe00ODfSVx1MDAxMLNlXHUwMDA348N8XHUwMDFhw2g/XG77lE9ja46dfVx1MDAxZk92j49+lLqdXHUwMDFkpXv61zt7JDzsjWf5w2/C3pwpd9BcdTAwMTmMw1xm+1M8krNcYkNSoFx1MDAxYVXUtFLbpjFcdTAwMTF3RjVcdTAwMWGgoiR8aVx1MDAwMTV2wft4JI9sIeOcYUyrZ3gkXHUwMDE13f1Wm4wnreF8dlCY/eKNXHUwMDFmh6+7J9JbXHUwMDAw1aqUXHUwMDEwQlO1tLxfpOJMsLJBhd2E1N/fXHUwMDEyXHQ8kmc4I1x1MDAxYXzFp7KK71x1MDAwNdXP44xk2zfN0ex9vZD7z3xb98NcdTAwMDSPVFx1MDAwMVx1MDAwMJNcdTAwMDJE24Vz/lx1MDAxNGJcdTAwMWZPXHUwMDA1PVx1MDAwM7H3LdhbXCJcdTAwMTbUUkpcdTAwMDTSXHUwMDA2Vlx1MDAwYifcarhcdTAwMTCofFxu/HQsK8ON//hmwIoz11bq5YCVKlx1MDAwMC+I4+pcdTAwMDZhrXOLUb+DLktp0CnWXHUwMDE4Z7mGP+TDXHUwMDA1XHUwMDBm1mhjhX2TPWVe7lVwx8G7U8tbXHUwMDE0fqBXwVK4uEvCQIFXwUFLijUuhVx1MDAwNC5cdTAwMWKA+21cZlx1MDAxOFVpbfCg8ysuxWo//jBav1H06PJcdTAwMDdC90xav3H1SLA58uFcdTAwMTjuv1x1MDAxOVx1MDAwNNtvIFDOnOf17rdiJmxcXJyWXHUwMDBmXHUwMDBlv5e67pPvN8qBnqWwrlxcg1x1MDAxOIKTu0Az5Vx1MDAxYi0wftowxGqLfOLtNFx1MDAwZlx1MDAwN1x1MDAxZKe33zJEcGBxTNjX2LnqRUxcdTAwMDDUXHS0XHUwMDE11Mlirt6HXHR86eK6kKh9Of5cdTAwMDIjenbWjb6cXY2GX6KP2IXumU16W1x1MDAxZWGDjS66tMB6MSizNZZcdTAwMWYvLP6cRbSYdZRcbvQn0Fx055xdpVx1MDAxMWBAUoFcdTAwMDMlKlxmKFMn347385TTxoA2wa1OQVmvcdYl0yltXHUwMDE4tIdcdTAwMGJMjKhcdTAwMDc0gqZcdTAwMGLXTn4uXHUwMDFh8S7BycdXRnxZXHJcdTAwMDJqKVx1MDAxNG4q61xmSDnjbuk0zyOESUlcdTAwMDHkRzCwniBcdTAwMTjKPs4jNrWqfbJ/fVx1MDAxMVaO8lrvTHaap4P6WbQxNMk0mHHQklx1MDAwMbQqUEbwh83iKVx1MDAxMEUhcd2IlIFxXHUwMDBmWvVcdTAwMDJcdTAwMGWzhqfcYzIr178uh9ko9/TrQ5F/Jol58Tpxy3Df3OBcdTAwMTlcdTAwMWJznp9cdTAwMGbNaU9cdTAwMWXmfl1cdTAwMWTu//qlLGveXHUwMDFjvm7Y4212WdFcdTAwMDZcdTAwMTikxuWwdjXqsSNcdTAwMDRLYVx0tVx1MDAwMz9cdTAwMGIhzDdcdTAwMTZTf9BCccellCwwf9YmK9ncdf1bL3OUrpfyhZtoXFzfPXeNf6dNVlx1MDAxZcGdY1x1MDAwZVx1MDAxYzdjtlx1MDAwZjeuXHUwMDFmzU+OO9xkhWlcdTAwMTdcYnDdtFx1MDAxM/dgh0vi3lx1MDAwN3bPX3+Or1DguID1g5yIz7x+9XfXnz9hQt57XetGt4E/tomgXHUwMDAyn1x1MDAxOFj1M/Yne1xcpX1Ov1x1MDAwMVxmXHUwMDBmeFx1MDAwNi5cdTAwMDBcblx01mp5yZXfOFx1MDAxNFxyp+FcdTAwMWOtKphV94ZcdTAwMGJcdTAwMWYkqFx1MDAxMq1cdTAwMDIpsKJcdTAwMThcYtRcdTAwMDKyS1x1MDAwMUhhXHUwMDE5gzNcZi6/XHJcdTAwMDKh1uwroVx1MDAwNWiaXHKVTX9t1+HxLdXvk3TwXHUwMDE3XHUwMDE0N1x1MDAxMmPLTi6Zslx1MDAwNUmXKSaBpOM4M1x1MDAwM+TkZa7D4zsufVmOi1xuhoFRXHUwMDExMOCrXCJcdTAwMDBS9KBNXFykOMhcdTAwMDbaXHUwMDEy2jtQPPRn/jjPYaPg0/VcdTAwMGZF/pmuw8ayXHUwMDA2Ldz9o7fKTzvnhHrGcovoV/W0fVrIdVx1MDAwZXfyO8BkJj+D7uvunvPq4U9cdTAwMTjNXHUwMDE0rq/WgmvQ9feKN2G8U87CZGBmhrFH1lu8SlGDXHUwMDE0WNuy+LdUc7eIm2w+Z/FcdTAwMTKZXHUwMDAwe1x1MDAxMjyVSu1HR43r42Zo2P5wwKuTq++7s8pcdTAwMDekUp+zXHTpMnyfrkuIWrqhOYs4a7ZVk8t2q1x1MDAxMVx1MDAwNfLsTERGtlxm8ETXalnN9ZJcdTAwMWP+fl3CRqhcdTAwMDVic65cdTAwMDFcdTAwMDZDm2e8XHTgtHtzwC+uRq3hJFcsXHUwMDE081x1MDAwN3HTxp++LFx1MDAwMfg5Y0wway1cdTAwMTOriytcdTAwMTXWXHUwMDE2cVx1MDAwYm5cdTAwMDTu11xi9vtt31x1MDAwNCB5yljAvFZSSVxyrvo6qG0+55ZnsFx1MDAwMLdcdTAwMDBQ+qlcdTAwMDKjRmt6XHUwMDE4nfys7VROWPA9fZG29ai5bP7++lhk6lxmX1x1MDAxMcdaZ2fmrFx1MDAwNVx1MDAwMi9cIqngII9aZ/Is4i3ZZlHbmXepXHUwMDEx0nKj1ePOciu04dvbPfatdJ7fuTrVvbk06XqmVylcdTAwMWZ/9lx1MDAwNUrMWCBcdTAwMTXglGOaXHUwMDEzXGbg6pbEXHUwMDFh3/HnJNXkYFx1MDAxNOvtVi1cdTAwMTidMlx1MDAxY1x1MDAxZYLVSFx1MDAwZiG4xmW3XHUwMDFjTlx1MDAxNtyt5/Z/XHUwMDExRP2GZ/+++b/7z3xbT124jbBcdTAwMDV/T2B0aftkPb9q/TpcdTAwMWT+XG6KP3O5bq90VctVjl6UrH/POiHmXHUwMDA0bvtkuTHgsnF1r7JcdTAwMGbf2KZccr4mlUl01zbC9oxcdTAwMDVcdTAwMTFjL4ctT0lcZlx1MDAwNOCuddYpztZcdTAwMTRcbokgSFx02jVCXHUwMDAzdWYr71x1MDAxZUjKc6GdwvLX2P3x1Vx1MDAwYoWAXHUwMDFjstdw0+/X1PxFqnU2zj/9+nDqXHUwMDE3N9zKbm9WXHUwMDAxakmo76tcdTAwMDBuXHUwMDE4Lp9fYOJJXHUwMDE18GjN9SdVXHUwMDAxnLtcdTAwMTSMPLNcdTAwMDJcdTAwMTSA42xVXHUwMDA1XHUwMDE4IVLgtjrBXHUwMDA1kFx1MDAxY+M270D3u1x1MDAxNTvAjpVRXHUwMDFjrLelXCK1dVx1MDAxNFo4k8JdKy2oJNzf6IFcbrBcdTAwMDFKXHUwMDBm27Dq+INrXHUwMDA1XHUwMDAz+WI+vJ1cbtiYaH90dciXRbSMp5hcdTAwMTbKwqRLXHUwMDE4Y1x1MDAxMImlc5JYmVx1MDAwNoughFx1MDAwMTZcIjRT9lWS7Fx1MDAxZql6NotcdTAwMWT+eyBwz1Q8m1x1MDAxY4bldXhcdTAwMGZKi7hcdTAwMDbP1iyoyZP7lJ38tOPeJJ++zOVcdTAwMGb33ayjOrXoc/tcdTAwMGKOw7hcdTAwMDcmXHUwMDAwd1gwKdxqnExwlULUWPCUXHUwMDE1kHOxubTod4mHMDyFL+jb0l3QXHUwMDAx7lx1MDAwMP4qRUS/USWIy1x1MDAwMd/NXHJISvKiQVx1MDAxN9p2V483XHUwMDFlfWiB4Fx1MDAxNq15W9fhXHUwMDExXHUwMDAwY0k4W651f1xuv4+/l+nT0lx1MDAwNo7vSdDMclx1MDAwNrQsWHX4XHUwMDE1YykmXHUwMDFjw53ErF1+a+lrXHUwMDAzWCuZslx1MDAwMpcxcCY0l2s8XHUwMDA3fI+hXHUwMDAy1OKroTBIq1x1MDAxZizsXHUwMDAzLWSlwjdcdTAwMTJ9Qt4gJVx1MDAwZtiyWL9cdTAwMWJvePxcdTAwMDV2X1aybMrhS05x11xubtak/UBYeCBBYDhmP7TWfzpv2NkoePTrQ5l7JeZcdTAwMDCka5PiwddTXHUwMDE4bZ+zwuD0/EcgZ7Vvolx1MDAxZv0s6PqsXHUwMDFknYv856ZcdTAwMGVcdTAwMDGz6K9cdTAwMDRKuVx1MDAwMNOJq9TBWpOS+L57dGjcyl66XHUwMDFmy1x1MDAxY1x1MDAwNHNcXFx1MDAwNSBcdTAwMGJcdTAwMWa1t72nXHUwMDBlglx1MDAwN4sh+Vx1MDAwZnV4O+qwMW0nXHUwMDFm2aPYSHwxkXvGa4W5dmdX0/Zl+/Dqm6rs6ur3TqP7uVx1MDAwMcyNpZd4Slx0Wlx1MDAxMyOL91x1MDAwYmuZSlx1MDAwNY6BV1x1MDAxNuCGZlwi2Mj+33M7M1AqXHUwMDBly57Xondh7ibVQWb8y7F847A8Pa43MtPvN/Vlk3k0XHUwMDFkd4aXP0psnJme1XLd01+9jFk+IexFl7NcdTAwMWa20epcdTAwMTVzXHUwMDA3R6Wfh1M+P/9TXHUwMDEyXHJ/3oZjfPNiYYcvwJXPKdZcdTAwMGJcdTAwMGZlbS/crc/UbnZ8MJlcdTAwMWPPjuajz45GXHUwMDAx9pRcdTAwMDFHUUpTyd4qXHUwMDFhuWUpLI7implArZSTv/6OYzoljDSWXHUwMDA3WybvnNSYl3gyX/5HY+q391x0fV8zu/7Jb/4qXHUwMDE5pTZcdTAwMDbyQZycdrjeeWsgd8+i4HJcXFPB0c4w2599r1x1MDAwN6Ve+r2A/NJXXmiZXG6MU0xcdTAwMDTAMVx1MDAwMn7frEqeYkpaXHUwMDExcHBcdTAwMTPYW5bdioClwNtcdTAwMTNcZvf210atXHUwMDAxM2h5UCtWmkBbXHSW4GE2j+NcdTAwMWLGsUzxqcz864H7RctcXF5cdTAwMTPzL2OUanP1SVx1MDAwMK6QMuJcdTAwMTlcdTAwMWLU6GJ8dnh+3odZaFx1MDAxY8zmmUlhnM588kowxVx1MDAxZG6KXHUwMDE5XHUwMDAwP5PKasFWbZhyOlx1MDAwNXRDyVx1MDAwMHfhXHUwMDBm3Oa31r8joYSZwVx1MDAxNykticeGupOri+txN51cdTAwMWblmLkwY/tzr2qHf4rp2opcdTAwMGW6ljjDlym7RtBwrNVqN1vmXGYmqanhSGBcdTAwMWJt3TqT8ux9KrmW31x1MDAxNPRw71x1MDAxOFx1MDAwYi6JXHUwMDEz25dV7k+buXJwflhcdTAwMWHOv513Tn5ccuS+fN03kr0+mKTVKbRcdTAwMGVcXMHsP3xphXDknFx1MDAwMe9cbnCTyzd8wzU3KeOc2JZcZlx1MDAwNrg11Vx1MDAxNlxc8L1cdTAwMDB198Q161x1MDAxNHfmUfFn+pT/yGWySk11pV/JL6qeVlx1MDAwNPWF61x1MDAxNN+PaVx1MDAxZV61b7rt6Zfq0cH7XHUwMDEyzbVcdTAwMGZ+26yP2ry6gZvAcm202T7v87hcdTAwMTR8zryPZjzlhMNqXHUwMDExq5Vl9+s8ZUpJKYRcdTAwMDJq57TZnPbBsthIvFxcO4CzintYIeRcdTAwMDVcdTAwMDM3fd3KLp6it6Ba7sDHNYrbpW12b1x1MDAxN2xqoTW3XHUwMDFiqj8/Nu/D+fKrpl9/ZdfjxunLcnZcdTAwMDecf81cXGCCXHUwMDAws1F6zdZScFx1MDAwZS6zXHUwMDAyhqVcdTAwMDLcKfBB1/8ytWqbJVx1MDAwZv89lLnF/bZcIiaPlaptzPxcYlx1MDAxY3miv1srn2x7/N1cdTAwMGWbOzu1aLc+2alcdTAwMDaZ0ZX45MqHM4ubeoHzKixwXHUwMDBmXHUwMDEx3OP5TKasXHUwMDExXGZ3vzOAnkdcdTAwMWPc39Q+oPlSXHUwMDBlMMqMMTpYXtO2SDpLfLWOwnd1XHUwMDE48GTdmtVcdTAwMWVcdTAwMTKkQ1utP6XyXHSE+5hitWcknaUzwEMx71xmtEcvmPtCLeEmYIZcdTAwMDWKXHUwMDFiXHUwMDA3xpvxXHUwMDA3g/KHKZ+djZJHvz5cdTAwMTS6Z2qfjVlnvjFKjq+3c5ptr3nO5iXVn0f5q5PTtrrozLtcdTAwMDe1dO9zXHUwMDA3yYHJpFx1MDAxOLpcIrjhY2DN6oJ2wVxcXG43XHUwMDFlUlx1MDAxYSRM2eBcclx1MDAxNY/QNlx1MDAxNcBkbLshhaJ9+1x1MDAxNf+oXHIpKOmMryR60SaZv+uPfHjKedu2vOUr8MxcdTAwMTKJfLhMVCCL19v7LHG28a1cdTAwMTVepc/DwlBMKia3Y1x1MDAwZj95rak2Llx1MDAwNXzAQUdxdf+qw1x1MDAwMtBlzOJqWSm0e+Q9XHUwMDFj71x1MDAwYlxcYzWjVTlcdTAwMWaKWyv5+1x1MDAxNYv8u+J2c6HIUi3jg0Wl2lrck+xcdTAwMTnvz22fy5vJd17YmYv8j3y5dnLFPv27K1x1MDAwM5YyTjqwX1x1MDAxYfcgWN1/ckfKXHUwMDE08Fx1MDAwZW1VXHUwMDAwnimWi2xC7zuG9Vx1MDAxZNB5o91cdTAwMDbgLvhpc193jsq6t1x1MDAxYt9cZm7Ci/KBUbXeMsu9PN7bb7R+jlx1MDAxOFx1MDAxN2eTUXNcdTAwMThcbibKyydk0pfGynP282TKfuVHKrwuqO5fKjHwqepEXHUwMDA034xGLmRcdTAwMDDEkG2fXHUwMDE4YMNRa/L9ui2Cg/26YSdnQ5F+Ucn3e6JRslx1MDAxNL5q04BcdTAwMTeunXariVx1MDAwMZ5iQiirnGBcdTAwMWF3N5GfqU5EY8RA8ye3MPmjMfWfOpGt6kTsRk82XHUwMDAwXHUwMDE0S/ucnVx1MDAxYVx1MDAxZd/B9rOWiVx1MDAxOKDDkjFcdTAwMDdsXHUwMDE0w1wiq0ZVu5TRXHUwMDA2X/FllNFWXHUwMDA3m1d8fo4yXHUwMDExzVx1MDAwMuNMsMHFfVx1MDAwYmy/4m6o2+xccr5cItvvkFx1MDAwZXyReXxkr3R0JFx1MDAwMqbd9lx1MDAxMaKjq4qoNUv6TGd/XHUwMDE0mo3+XptcdTAwMWR8/+TWUUidUiCkjDN8JfVSkYD3M1VcbtdWXHUwMDAzsDhYyOUt+z76zVx1MDAwN1ZpgJ+wr1x1MDAxMIX+jXVcdGY5WfifXHUwMDE3XHUwMDFmvMeLXHUwMDBmNteTLVx1MDAxMdn7a1x1MDAxYjXoW9Api+c9ucLoWOr2z/Jsp381KZy3v5v6eG4+OZaltCngXHTSXHUwMDE4w0RcdTAwMTDc34JcdTAwMTgrU1x1MDAwMo17a2uDr/K0XHUwMDFiw0bv6Hji4lwi45RZWl2y3lx1MDAwZfK41/5lWLP2PTZcdTAwMWTLLnaa0/7K3mHTczs+/jb9zkvdU1HYq+9UuvGKa1ooXpzXXHUwMDA2plx1MDAxNtaLubxcdTAwMWSNOuo4br1cdTAwMWZL5lx1MDAwMVxmvMO3XHUwMDE4/Zt4no+8gFx1MDAxMEiaxWWG229cdTAwMTbQOcm3roon+e+ucPazZFx1MDAwZq7y7WL1k+NRXHUwMDA0XHUwMDAwOGuBXHUwMDA0Osms1PcoayBS4N7ha4U0uIX2XHJ3XHUwMDE3e7bnKThKa7D88oD1qPyjMfVcdTAwMWbPc1x1MDAxYs9Tq81cdTAwMGL/OHeGXiaxNY7rXHUwMDFkvv8rPFBq3P2R39nLzkvR0fkndz1cdTAwMDVcdTAwMTcpXGaUSVx1MDAwNTRZXHUwMDE5u1o8tsNcdTAwMTXu52mouMaCX/h2b0V/pVx1MDAwNVxu1oGDtfy2ybfG9msuUHhcdTAwMTHkvfT/LWn218bl5THY0/Zd02BcdTAwMDS6rURcdTAwMTUsbvNcdTAwMTWTKbuPz9Hfkj6QXHUwMDE1XvT1KzBcdTAwMTVmTdSEkWaBamvXtqolmoqfXHUwMDA1imvXitqtNmu1Wkuj83XYXHUwMDFktivLLPe/rm86f59ccpc2j/Yg2vrmd9fBXHUwMDAwNrDg9J/+z3+u3P7/oqYw6lx1MDAxZod7RXE631XN2mxcdTAwMTLFrNvYO2JRdnRzIFuyNdcynOubaFx1MDAxON2EvfQ0zLi4NYy6hb3W5ene0ejwuKDC7O60nSl0XHUwMDFh+ZPLU3HOlo+1hoNBi+3ftLOsXHUwMDFiZtLjsJKeXHUwMDE0s+VOMS7MXHUwMDBmelx1MDAxZFbspmXYK0xK2bIo9FRcdTAwMTDlv7FGZrd/eLxfXGbjqjro9Vx1MDAxNZwzXHUwMDBiM2paXHUwMDFj5uB8XHUwMDA151c7xWx9XHUwMDEyxuV5IZvuhFm8bzQpVdKikC1PipVwVsn2xUEviunaOIfnzuqVMlx1MDAxZZtcdTAwMTeP03ifuJgtdKBN0CdcdTAwMDbH1LyUScfhsWJhfH59UKlPXHUwMDBmetCPbnpehPaFvZNzfCa0m8Mz4D4hg9+m8Pypv1e5XHUwMDAz7aVn+9/XPCdbmFx1MDAxNHtVaGNcYp9cdTAwMWTVzOZE6VjNsD9hL0fP29jGbFxix6vQvvSsOPdtXGZ70VxmrpmVjtkkzNbnT1/L/LXZ0+uwUtdcdTAwMDe9tFxi52yOc1x1MDAwMmNcdTAwMTRvvr5cdTAwMGb3XHUwMDBmx2FcZp+9nK5k61x1MDAxY8YpLkD7Q5yPyunGsTmoXHUwMDE0OI3jMUvGtlx1MDAxNYZxXHUwMDA32l1n4Vx1MDAxY/qSUbxUe2ReezRm8qCC81o0he7usFGbXYOM9cK4IOqiylx1MDAwYt3g74d7u+etfKdzXG5yVqmEgp6V7XRKXHUwMDE1aHOclvU5XHUwMDAzWUpL6D/0J1x1MDAwN/JcdTAwMDL361x1MDAxNWAuXCLoUyRcdTAwMGUqOVx1MDAwNm3Cvs+gLfg5LWRgbuJcdTAwMGX0vVx1MDAwMNdcdTAwMTYkXFxcdTAwMWZcdTAwMWb0yjDfON45mGeQq15hjPct0Nx29MEx3H+O50XQ75yGccBPiXJcYuNcdTAwMDL9gvZUQlXAsa2EKLPS3yfs0Dz0UIZR3urwvVx1MDAwM/es0+/QVlx1MDAxONvC7Tzi91x1MDAwZYy9hrFcdTAwMTnDJ+At7IBcZotiXHUwMDFjjWFMJbRcdTAwMWbaW1x1MDAxNeFwOsb5KGbScG1OlfK5WYnmO8T7asBVXHUwMDFjxtFcdTAwMDJcdTAwMDddnKNOp4h46+33XHUwMDBlKiHcpy/heoXjXHUwMDAz8lwi4Vx1MDAxYVGcp1x1MDAwMS9qXHUwMDA2uFx1MDAxZJcqKPd16FtcdTAwMTXGsKqL+dy8mK1CP7HtXHUwMDA1XHUwMDA27ZpcdTAwMTV7yTOO2Z084Fx1MDAxOMG8gFx1MDAxZajPQpS3SjhGXHUwMDFj4lx1MDAxY0BbOcg1PlOVMlxm9Vx1MDAwM1x1MDAwM0zCc8qcZCFcdTAwMGLjWsnBtVWcXHUwMDBm1Fx1MDAwZqyUrcO89kFcdTAwMGVzXHUwMDFhfoN2d+C++JxwWqTnwblcdTAwMTl8fl3DPVx1MDAwNepcdTAwMWZoh/bzTvdcdTAwMDAs9GG8OprGOK6D3OFcXFTncD9oQ1x1MDAxYdpcdTAwMTDid1x1MDAxY3NeqpTHIchTkfpQZvAp8N54XHUwMDBlYFx1MDAwMc5FTOdcdTAwMTRcdTAwMWOP6VlzXHUwMDA241PWJD9cdTAwMTVcdTAwMTgrmN9cdTAwMTKMj59fvHdcdTAwMGVxwYteZuBcdTAwMTPbmkZcdTAwMWSGmJr5PpTnMFx1MDAxN/A3zn2nXHUwMDAzz4d2hKg/aFx1MDAwZVBcdTAwMTZB9qdF0Icg4zBcdTAwMDY5XHUwMDE4c+pcdTAwMTc+XHUwMDEz51KgfJSyOZSfXHUwMDE5zFNcdTAwMDd+h+8h3mtcbnpcdTAwMDCPM5RcdDjPz1x1MDAxYvZcdTAwMTG/z+GZcYjHRbGC8lx1MDAwYmNTQbmEZ2X70Fx1MDAwZpDXuFxmc17F+Vx1MDAwMXnIQfv7unSchjHuoMzPcM79cZSBNLS3gDZcdTAwMDA+Q1xy/UJZiOF8QTIxx+/9uESyV45L+SliXHUwMDE0xlx1MDAxM8ZcbsdVgE7qVfHZoItQTiPoXHUwMDFii0mXZKNOXHRtXHUwMDAxYr3XR/lBfYU6SCf6PWk7jFx1MDAxYugjkENcdTAwMWN7XHUwMDE4V3xmWvmx7mg/XHUwMDE3oYDvXCKxXHUwMDA3wuOJIb7m/nx4dsX3XHUwMDE1+lx1MDAxMGOfQG5cdTAwMDTp5lxmtjVEOYRPXHUwMDE4n1xmynRcdTAwMWZkXHUwMDAxx9brIcDunHRcdTAwMDDqylxufaKenaPsXHUwMDE18ZpejmRcbu1cdTAwMWJcXFx1MDAwYnZcdTAwMTAxnUObXHUwMDA181x1MDAwNVx1MDAxOMX57uFcdTAwMWNAXHUwMDFiXHUwMDEwa1x1MDAxNVx1MDAxY5OyOridd7Bcclx1MDAwN6RLSGdMS6jL8bkgXHUwMDBm8FxckNG0x1x1MDAxMGIwXHUwMDA2eUBbXG5jlYxNjPNcdTAwMGKyr/y5XHUwMDA11G94Llx1MDAwN3kjXHUwMDFkVcrSnE793GBbQtSJM5JcdTAwMWbAXGL9Pb/FXHUwMDE22dhpqdLBv+cog6hDUG5cdTAwMTO9kMhcdTAwMTLqxFx1MDAxY1x1MDAxYy9cYj/GYJdAXHUwMDBm0DPnKK+od1x1MDAxMVM4P6BXaP5CL4NwXHLhNKY2x36sPJZg/lx1MDAxMO+gj1x0S9h/iZhccml+USeivaqOob1cdTAwMWPtNOhhbFx1MDAwMy96mdRFkknsP8nk1MtuX5RqU7pcdTAwMWX1XGL8XHUwMDBlOjCcXHUwMDE1vZ5E/cdcdTAwMTBfRcJFXHUwMDBl2+jl/lx1MDAxOOQsk3CHXG7J2SxcdTAwMTRT1C2AL8BcdTAwMWLNx0lcdTAwMGbuz0uEjbJM5Fx1MDAxM+6N53dIXHUwMDE3wHVcdTAwMWTQxXOvhzqIXHUwMDE3iTJ7XHUwMDAw81ick66Y4fiEOFx1MDAwN6jre6if+rfyhXxcdTAwMDXGXHUwMDFj7ot6PJvguVx1MDAwMv0jOSjjXFzMbvUwtEvT3CC/QlmNc2SvQE7GMK+K5GeOXHUwMDE4i1A3ymKlj22bXHUwMDE2UeZQXsnuRTg38Jw68Vx0uFx1MDAxZWQnQmxJmu8u2VlNbVx1MDAwN1vs5YNs6Twkm1x1MDAxM42pv3S/+tjPXHUwMDA3/E1zReNcdTAwMTN7OVxu5yTbXHUwMDE5/DvtbbOXtTnyXHUwMDEzb6tJjrFtoFx1MDAwYkPUhThcdTAwMWZ+XHUwMDBlvG5UYKepLyHNNehXwnLEvCxEwIFcdTAwMTDTXHUwMDFk1Fx1MDAxZCzhYzjWwD9IVlH/a5Rf1IvAR1x1MDAxMOPwvVx1MDAwZudBu+F5yCGg7chDvC3IVtF+oS6l62ksUSeTXGaGKOsxtqOJ8lx1MDAxOaOeOFx1MDAwNS6E+iFM5Fx1MDAxZDjrnOyAXHUwMDBlUX+T3SwgnqelZI5Ql4fZ1jngXHUwMDE324Y4UPBcdTAwMWLwI8/bXHUwMDEyvcpIhir1uI7nz9Nkn7zerlx1MDAwM8fC/oJcZtJ8IcZQJuuJTkS7jGNX7ZBNJzktoC2DOS5cdTAwMDPvxWuJz8bE8alvKIvII8FcdTAwMDaBflx1MDAwMnue6Fx1MDAxNZxP6FOGbC7OXHQjfND3xI5cdTAwMDFnKSHeujRcdTAwMDfcj1x1MDAwM+JcdTAwMTPOjVx1MDAwYlx1MDAxZNI5Wfqck1x1MDAwZYH5XHLv5rdAOqVIbVx1MDAwMZ1/jNhGXHUwMDFiQHKlSS5oPjrSy2phjnomhL57/uj1czFcdTAwMWIp5HclwIjnJjifJOOApVtcdTAwMTlHXGb5+YWxUVx1MDAxZUNcdTAwMDXiXHUwMDE5IOcz4lx1MDAxOajnSd+HdFx1MDAxYzA5pr5cdTAwMTJ3ySFnl8RcdTAwMTOO8bODtlxi5UigXHUwMDFjw1hzlFx1MDAxN1x1MDAxYVx1MDAwYpQr5Hoov4DtUpae5/XVMfB+1Fx1MDAwN13UV7dcdTAwMWMl0og3b1x1MDAxYqtyYZNIpzLAXHUwMDBi9/q8XHUwMDFlez1cdTAwMTeBrUYsXHUwMDE0vC5BXHUwMDFijM/xflx1MDAxOPS3zFEnoVxml7J98PmQ3/ahXHLEU1x1MDAxMIOk/+BcdTAwMTnYTuH9XHUwMDExlFx1MDAxM+Bf0DawXHUwMDExKDuAd1x1MDAxY5tOJ+HlXr+SPiyMSVx1MDAxNjzu5lx012Oeq+ZQX8WJ7OA4TUskg7k54q6I894jLoP2XGZ1XHTpPX+vsvbnRtB2utfcyyl9R2yTz1Yku+tli/hWXHUwMDBmbWVcdTAwMDF1JMo/6oeY5tzrXHUwMDEwxJOkfs+xT/id/Dfl7SF+r1x1MDAwYtLtZMPraM+l1ydo51x1MDAwYl6+vSyrXCL5XHUwMDA3UYJRPz63ttjzTtTX1IdEj8NcdTAwMThcdTAwMTAvRd6JXFwlisOFzVx1MDAxZic2n3gu9Fxy53pOfJo4IdraiHww4r7elqI+XHUwMDA1XHUwMDBlXG78XHUwMDFmeUaMtjpCXHUwMDFiNKMxhe+h9229XGb2XHUwMDEybpON8Fx1MDAxOYpsaVx1MDAwNmVcYrlcdTAwMTD5XHUwMDFkxFx1MDAwZrw8hJ5X98imoD5Fvlxieoc4XHUwMDE0S2w12jTPxfJTlHuNtlx1MDAwMLlsWDnv+etcdTAwMGLkP4Isc+9ToXyW8V6JX5BG3S3QliCfKJLu74/RPsJcXDLvXHUwMDEzdsZFiilAW4BjXHUwMDE1aVx1MDAxY+skZ15WqzOPhVAmXFxPeiz1XHUwMDAxk8TjUVx1MDAwZVx1MDAxOeHV21+yLWGPeF+MeIFj3NvQ6ti3JYeyXHUwMDA1tjOHbWKJTke9jzxagvzOsZ/4XFyKW+D5xLVcbmQnUF+hfvfYRO5FemxG+mGOslx1MDAwNrq1Up7D/ImS14Vzz59cIpT7OLFhc/LliEugTOJcdTAwMWNUkVx1MDAwZuPcxV6GXG7Mn4/nMWxcdTAwMTPHMfe8XHUwMDE0/NAuzmXZ+9pxXHUwMDFmx1xynot8PEf8MuFcdTAwMGYxylFcdHVM3J+Rv1x1MDAxNVeJe8KcxIm/1VnytzT5xnOU21x1MDAwZT1cdTAwMDPGqUOcv0KyKDwvTSd6mfQzcViMXHUwMDE3wDNj8sczaW+jkbckPqPngqRPiUd5n73Ofbv73oZcdTAwMTB/70jvf6FcdTAwMWZx6/uivq5r8CNikkGQL9RhwDPhWSibwCkphkE4mPv4XHUwMDA0zFx1MDAwN+nWqjhIOFx1MDAxYeKB4jlzXHUwMDFj77IkXHUwMDFlXHUwMDEz3/q+4TyxXHUwMDBmzPuj4E9cdTAwMTEmXHUwMDEzvzTGOEaZ/Fx1MDAxOeo3clx1MDAwM1x1MDAxOFx1MDAwZppcdTAwMGLkIT30h2hcZlx1MDAxMDeJLkV/k/DHMU5cdTAwMDZ+hPb3qGs/XHUwMDA3KO84N2BcdTAwMDO7xGugL6h3yszbsEhcdTAwMTCP6pFcXKuEy8dcdTAwMWWr5MNcIveaka9biW59eOXbQGPBbv1v0u8/LsdePyC+YY7IroaqSVx1MDAxOFwi3S79XFyj/1x1MDAxMCZy3eqRvlx1MDAwMjuOnFx1MDAwM/1cdTAwMGaan0za6zfyd1xut37K3GOX5lx1MDAxODnV1PPeOskh2qLiPJE1xFx1MDAxMs6x12OJXHUwMDFmhbarj/5cdTAwMTLz/OOoRz5WjFhGXpdGPs2S2FpMelxiY5xdj1xyjKFcdTAwMTUrxXPy/z1nQlx1MDAxZinxRTHmlUNM3z5flLLnplnB+Fx1MDAxZMVcdTAwMDWmdXxOr4Dyi89ATiwoVoNcXMb7+DBvXHUwMDFkmfAwhtgu1kA/Ylx1MDAxY1xy43qJvfW+KflawO1cYtvM66806tlcdTAwMTnhq1f2vj/FVVwiill6fEa8TjyiM010i0J/O4nvzJO4XHUwMDBi8jzZpGfQ2E5DeVlcdTAwMDTdOqd+XHUwMDAzly5RXHUwMDFjdP9cdTAwMWFtJ84x8ONZ4kP5a3B+hyHpUG+Xq/g89LNv53HubVxcXHUwMDEyt0NcdTAwMGVXy1F7Skl8XHUwMDAyeEdcdTAwMTJLzKGugPEk3kgxUO83+3hcdTAwMGXI922MXHJtMsalYs/xWVxcRF9cdTAwMDPtPY5dXHUwMDBmbFx1MDAxNNqBSlx1MDAxNPtzqb3a245cIsxVgZdIXHUwMDE3I0aKPfhcdTAwMWJwVfaxPtD30G5GcVx1MDAwMuw/6e4wsdtcdTAwMTifwNgp+ajIQ6ehj0XyYpbicWBcdTAwMGZgjmPiXHSSfNNcbsZUypquwTmOy6RbXHUwMDEz3Cd+OPqUIfVcdTAwMWT9I+JcIj7WXHUwMDA1ur+P9oajnSl5zkE8XHUwMDExr/fzXHUwMDFi+dh7XHUwMDE3/e26n1+KV/RcdMfoXHUwMDFi+vlccmc+7lDv+LhcdTAwMTPiukpcXCvBM6OYIXJ2f/+4gbpcdTAwMWT5/3BKfjLp+Irn1GRcdTAwMTMpJl5IOElhdouVMJO0XHUwMDEz+1lBebyLsXGvT1xuXv7kJYxHNZErxGxVwDjMaFxcSO/1NfG3TOK/U6xkcJ3EXG5cdTAwMTL+XHUwMDFiXHSvw7yMky9OMTqMV1NcdTAwMWPD27dcdTAwMTi5L8pcdTAwMDPmK0hXT/09SVehjlx1MDAwMDtR0NRcdTAwMDbARaL7XHR7XrfgfdH+58jWUL6C/HG0VdHM+1x1MDAxMMij6rd2aoJ6XHUwMDE1Y3JkY1x1MDAxMq5V8j6RXHUwMDBlvS9cdTAwMGWy9c00ca57UVx1MDAxMju6xVNndudcdTAwMGZcdTAwMTEnw1x1MDAxOCTqIPTlXHRXMlx1MDAxOesp9Y3i1tg+sqGM4ntcdTAwMTWy29L77P24KKZcdTAwMTPiWj20PdhXiiN4XHUwMDFlmlx1MDAwZok7XHUwMDAxXHUwMDBllI/LVtHGsZK3eYx8XFzgkcVeXHUwMDFhbVx1MDAxN/Q78r6AmKKdTeK8XHUwMDFk0vWkhytVUUd7XHUwMDA2NpXmh+YzmpKtxDg62bJEx4H8XHUwMDE1Kd5cdTAwMDH8XHUwMDE4/I9SXHUwMDA1fV7sS535vmLMkHxexFx1MDAxYfFsXHUwMDFhu3xIMpPE5uMkr5TgLvJjXHUwMDEwo79cbp81L7dNikFSzE8g10NdVPScXHUwMDE578NJj1Tqt3pcdTAwMTLl3vvimcSGVTDPUFx1MDAxNj5WX0b7wcmH9vF4jrGLXHUwMDEy+Vx1MDAxN+GY7lWbxkXyRcqkk1x1MDAxMHdccorbXHUwMDE3qY/kXHUwMDFi335cdTAwMTJ2XHUwMDExK2S7hMfIqFvYK7J2bTY4PN5npz/O2UFtMDnNn0xa2dG0JI9cdTAwMDbtvfK4XptdnlxuZVwieXRcdTAwMWVdlG0zXHUwMDBm51x1MDAxY/PRaW1w0dgrm+bQzZu1b9e357f29s+bXHUwMDE3xWFT7o9LQ33TXHUwMDFjVm19OLupi+txtLd/cyr3XHUwMDA3kSxeNuGerXzBwrXzhjiZ356LbWhI8HB+7Fx1MDAwZVx1MDAwZWqnN82L8jiSu4O6XHUwMDE4XGZcdTAwMWK14nkrP7hp9i4r9ZqOMVx1MDAxZtRcdTAwMTSaXHUwMDFk/Ghcclx1MDAxYbXWqJVlXeDoUzjea4rZTdRj3UKMOSzMb1x1MDAxNDqnw8F1XHUwMDEzzimI0yH91y10oF/zplx1MDAxOFx1MDAwZjCnlFx1MDAxY1x1MDAxZML1MfTjpiGq45ZcdTAwMTj0W/mOK5D/Weg05ekgXHUwMDFhnl43ZeRcblx1MDAxN6eXkVx1MDAxOHSb+Wq3kPdtPVx1MDAxZH5cdTAwMWI3ajNccmOatDH4+6HPm/59qVx1MDAwMuSqvVxc+SxcdTAwMDOjjV1a0olL/Y/a46tu++bhWctlXHUwMDFk25d/vSTn/PzasvfIOc+Lq/lm+n4v1zwtkF6D/93lXGb3JyArg6W5dYVhkUdcdTAwMTf7g+bFUZbkZUlcdTAwMWVApu6etyxcdTAwMGYk/yCHXHUwMDExPCdcdTAwMWE63lx1MDAxY5ZcdEfw91x1MDAxNeDiulHTg8bQXTZ7S7/ni9f1XHUwMDFmxVx1MDAxONpwWVx1MDAxN25SyFx1MDAwZkA+1biV/1x1MDAwNrJzMvffdXzw4+hcdTAwMWOwMoi6/Kb144judf/cg1x1MDAxYfQxU7jLeS7k965/Pn9cdTAwMGVjXHUwMDAwdpa1wE87zu6ah+NwP2+aXHUwMDAzPV+89jzw5Fx1MDAxYa7XYN9cdTAwMTXpPuBcdTAwMTdwnOJQTeBTXHUwMDA3YCtvv6+/T1x1MDAwZfRhp0O+xHHh5rA3m9Z/XHUwMDFjjVxu+bIr9DHuXHUwMDAw9mU4hTnqd1x1MDAwZuJ7eX/ibvtgM5evn95E8vTisPPf//1cYoao+iV4XG5D/qxVXGadRUpcdTAwMDSGXHUwMDE5rqKmldo2jYm4M6rRYFx1MDAwMZfwpVx1MDAxNVxi44KXYWjbm/9cdTAwMDdDW2LoulUrXp7mq53m3kmvUTtcdTAwMDI7Ubxp/tjlLcJcdTAwMDfhZe1vVGMyPOm1MrtcdTAwMTJ+Y1xy7GdcdTAwMDXvfX5nP0owXHUwMDBl2H/U9e1cbvyO/blcdTAwMDD93yt4Wb1/fS+99nq8jq7H++/tKz9cdTAwMDb3sYKxivNcdTAwMTGcu1x1MDAxNiOlfEdsZz+ccLh25lx02U/OWpH9rd9cdTAwMTP+XHUwMDEy2X/+S8j/irKPOYNcdTAwMDfyXVx1MDAxN9+mcM95U55MTjNL11x1MDAwMd9qXHUwMDBlT341xWCy/HtdzM4jXHUwMDE5wjO9fDdQrruc1Wv716c/XG7++0W4ZCdcdTAwMWPIPN6rev/cMdzrplUrd+9k7rZtIK/A54DfXHUwMDE1XHUwMDEy+Vx1MDAwNN8535lcdTAwMTf3wMfOdabt7oJDJefdl1tcdTAwMWarmKPshlQnQTFrbyvIXHUwMDA3pFx1MDAxOEI2LVx1MDAwYndxUfqNlcBfplx1MDAxYTHK2VSTXFx11duALOZaylRcdTAwMGLkrysr8GXAp+jHXHUwMDA1iiNjXHUwMDFljvxcdTAwMDCM+2CccO7rrFwiXqA8XHUwMDAwjFx1MDAxY/p/WIfg85hJzFx1MDAwZbg5+rBcdTAwMTWsIUB/XHUwMDEx853feiHlbDtcdTAwMWRcdTAwMWZ/Rlx1MDAxZlx1MDAxMWud0lx1MDAxNIctUtxcdTAwMTdzZj5OXHUwMDA2nFuXanjvPsaspvTcbN3n8ny+miWxXHUwMDE0uOcp9lx1MDAxMc/FXHUwMDFj3ZhipsjP6TdcdTAwMWNcdTAwMWK6lvt7dGTD53bAT+iQ7jmQIPex2mxcdTAwMDOVsVx1MDAxNizLXHUwMDEzeuD2rFx1MDAxNT2w9TvKX6JcdTAwMDee/1x1MDAwMvS301x1MDAwM1x1MDAwYjuCdVx1MDAxYtNCXHUwMDFlY8DpTvNcdTAwMDfm2Vx1MDAxNvJdlydz0PWTVlx1MDAwZbCTd2i/RKN2XCLLQ4e6fEpxpUqacpuh11x1MDAxM0s2JX1fx/hz7tU4XHUwMDE2iSvtZkGOMT/IS8ekd2RcdGNcdTAwMDS9eqdIeYvCnPBEOUjy/2e+RuA0XHUwMDA0uUVZRFx1MDAxOVx1MDAxNcCZwjCubvpcdTAwMWWH8X5cYvdDuY/D/pRRvJfut+Y7xU2PsiCDmP9cdTAwMTL+eopTYn1cdTAwMGajPGq23/H3z2HNXHUwMDAxL1G8XHUwMDE4Y6yhXG4ppnNcdTAwMDKIxrq5qsZcXLGPgWFusM7vfN8sxkrqMeBGXHUwMDE0KeeL11dnYT9cXP4ufDwy6izdT/jcJtaKVNVt7Vx1MDAwMeAspvhFNeRF8t3L0J+6pPhNr9BZ6o8oYa6W4jZlXHUwMDFms6/04fq+j4mvjEdu6uOfXHUwMDAzXHUwMDE4XHUwMDBmrKHqUC1MksfAe1Jdkj9W7vg5xb9h/OI+S/Lr9FtcIi+yRDH26PY7h7nKJn/PsV6JcpN316aTe+42gFvPbttcbv/zseh+yKj/lVx1MDAxMGspcOxZ6HVuXFyiXHUwMDFjSzq+vUc5hs84VFx1MDAxNPdcdTAwMDB9WcScZ1x1MDAxNvPa/Vx1MDAxOK7DuCTmU5J+9zXVWVSnS8fqXHUwMDFhZGX26HdsS/ZcYmQzdyeLJcpXlFklu6b9g8uib1OB5LRcdTAwMTjvw7iVZ5RXmuN3zOFhnWTEKGaHcas4ojpcdTAwMDc8VvIxbTzGKK7Rn2qqI/W1LVxuZVx1MDAwN8dcdTAwMTd0/ZxiYCCLJawz6GG8q88pLoy5g2yf6tBAXHUwMDE2dZFi6T6+6mOjp3g95t8wXHUwMDE3zMF+oeyA7PVcdTAwMDXVUFJsk+pcIjCnoCmWRXKCNYeo/1x1MDAwN3A9yjrFmzCWg/Hg2+thvHJ4PdWMwlx1MDAxY9D1Po9cdTAwMGXXU15ccttfpzgoXHUwMDFkS2Lu/liBai9CrEnIYrywL3xssoD31JQ3yYVwT5SFiOLsVOtcYnjx11O+m1MskOSMrr/FIV1cdTAwMDdj46+jXHUwMDFjTye5jnKenHI0Mdqs5efmvG1Prlx1MDAwZqnWtkp1iyDfnaW+YNx+hjWMfnwwPoX37FDstlx1MDAxYy+PXHUwMDBm5oU6XHUwMDE4s/XjSzG86ozmXHUwMDA3dKW/nvLenHJ6lXCWXFyfzFx1MDAwZsXrUcfS/GL+XGaun/t8Pl6P85vz1/fKJK9w/UI+Kmjj61hHXHUwMDAw12NcdTAwMWVcdTAwMDRrgKtcdTAwMTiDi+FcXLi+LJK6RtA9fv4rS/JcdLrJ3z9cdTAwMTdKrFx1MDAxNVx1MDAwZimuinlcdTAwMWTMYYLuwrpVkFmKXHUwMDFmYp1wjHH+aEY1RKTbQp9rxeuxJq9C1+P4Ue1cdTAwMTTNw1x1MDAxY/Rw5VZ/YFxyYkTtKVx1MDAxMeeqzyuAdVx1MDAxZsuukj6A9mHuT3o9Rlx1MDAxYyam9vl7MVx1MDAxY1x1MDAwN9BTM4zlUs1QjDjxtcTLbfP2q4y6SFJcdTAwMWM/xrahfCD/q2NbbmtqXHUwMDAwz5RnxOvjkq+5RezOa322pHPLSe7uKPTturVpOcqXlsHuUO1WXHUwMDE3jlEtXHUwMDBl1vpV5Z1dIfmiulwieP6KXVn6Xlx1MDAxNlQ3VqGxiH28t4wyKr0+uHsuo5rMuMxqXHUwMDBm/TSJ+lxixiQm2YOxw7nHscZcdTAwMWFcdTAwMTjP68JE56Y1jSnVpYbU/yRcdTAwMWafjGmBl0Gv+dw/2n7KtVx1MDAwYspcdTAwMWbGpK84jVx1MDAwYuXtcVxyRaiT+lx1MDAxNubrS2jdXHUwMDAz1rJ4W+bzvGhcdTAwMGJcdTAwMDFzXHUwMDFkrFx1MDAwM+JcdN4kymOJ5Dmnvf3BfFx1MDAwZXKDfuzj57lcdTAwMDTDNKea6suymFx1MDAxM67i86mmXHUwMDE3c1x1MDAwMyWqXHUwMDA3rFx1MDAwMiYwXHUwMDFmTvec+jxcdTAwMDfqo4hyaoiBO1x1MDAxOc5SrY5cbqs4Tph7Q9micVx1MDAwMp1cclx1MDAxOI3LpEOxjivMUr1cbtbo43pcdTAwMDL/XHUwMDFkMLVmXHUwMDBlNNVmXHUwMDFj+2fiOFx1MDAwMG7nlFx1MDAwYqacXHUwMDE05igwr5rWvs6OcI/yXGacXHUwMDA2dVHhtm4taTPhXHUwMDE2ZVNT7L2XXHUwMDEziZ3181CpYl3XjHCNuIkxp0L+h/D5N1x1MDAxY2eaW59cdTAwMTPrXHUwMDE1KI9TquL1XHUwMDFkvFx1MDAxZXM62D7khFx1MDAxZG/rXHUwMDAwx3Q91lx1MDAxYdC6XHUwMDA0qtnHfEeyniWmnFx1MDAxZOZcdTAwMDL9vJJu9vlAXHUwMDFjw+qMcj9cdTAwMTXUzagnOlhLRTpcdTAwMWR8qNt5XHUwMDA1O9RPbEJZlFAuXHUwMDEwb1msXHKk8Vx1MDAxMKhcdTAwMDdKlL8px35cdTAwMGXrlDuBY8i7krFcdTAwMDSZO1x1MDAxOW3nj0hnnLXSPlx1MDAxNZfwZ634I1u/aeMl/sjzX+PxV4xLvHFMrnXdXHUwMDE0+4OH8Th/XHUwMDFjfaG6XHUwMDE4zOtiNoD7KPKRoJ112XdcdTAwMDXkXHUwMDA23d05XHUwMDFky6yNjWGOXHUwMDFha1x1MDAwMqhWlNbroD/mx2fq8+xcdTAwMGZ/wzrMhzE4ijePMI63pUxLpezSetBccjLtz7pbhPivv/3r/1x1MDAwM3HOjYoifQ== + + + + + Your ClusterLapdev-Kube-ManagerLapdev EnvironmentLapdev EnvironmentLapdev EnvironmentLapdev Sidecar ProxyApp Workload PodApp ContainerDevboxDevbox intercept traffic from clusterLapdev EnvironmentLapdevDevbox client traffic to clusterDevbox client traffic to clusterApp WorkloadPreview URLPreview URL traffic to clusterPreview URL traffic to clusterApp WorkloadDevbox intercept traffic from clusterApp Workload \ No newline at end of file diff --git a/docs/.gitbook/assets/file.excalidraw (3).svg b/docs/.gitbook/assets/file.excalidraw (3).svg new file mode 100644 index 0000000..7a20d0b --- /dev/null +++ b/docs/.gitbook/assets/file.excalidraw (3).svg @@ -0,0 +1,8 @@ + + + eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nO1dWXPi2JJ+71/hqPswXHUwMDBm06jPvvTEfVx1MDAwMIxZjbHBYDwx4Vx1MDAxMCBA7Gaz4Ub/98mDXUYsYrHBhbvtiqoykpCOpPNl5peZJ/M/v52d/Vx1MDAxOE56zo8/z344z2W75Vb69tOP3832sdNcdTAwMWa43Vx1MDAwZexcIrPPg+6oX55cdTAwMWRZXHUwMDFmXHUwMDBle4M///jD7vWsmjssdbtNq9xtv3zNaTltpzNcdTAwMWPAgf9cdTAwMGKfz87+M/vXcyG3bdec2cGzzfPrUCmXt6a7ndk1MSaKcaQ1ezvCXHUwMDFknMO1hk5cdTAwMDV2V+3WwJnvMZt+JItcdTAwMTPe7lx0XkjE4plqtlx1MDAxMlx1MDAwZVxiqeeXrbqtVnY4ac2GNOjCnc/3XHKG/W7TKbiVYf3n7Xu2+32r31x1MDAxZNXqXHUwMDFkZ2BuXHUwMDFkv23t9uyyO5yYbVxivW21O7XZOeZbns1cdTAwMDOggllSM6W4ZkyR+c2a71x1MDAwNzAnXHUwMDE24ohcdTAwMTFMpZBcdTAwMTJ5XHUwMDFl18vIwt1Wt29GNuzbnUHP7sOLmI+vZJebNVx1MDAxOGSnsvm4p5/3Pb983XFr9eHitoEze/SYYKaxIHK+x1xcpFx1MDAxN6/Mp8Bsa6aSl2rKa5GsvlwiXHKWeGjV9PTtqnBA5V46aTdcdTAwMWKapsdPXHUwMDAx0miz3OOT9Fx1MDAxZVCIxc9DoWcyLvSr7rB23blRrdGP1/3/N3+zfbvtxM3YOqNWy/t6OpXX17Owo2R2RDzTdn6qUa9iv8wwLFx1MDAwNWJcdTAwMWMrXHUwMDBlj52/7W+5neby6VrdcnPNpFx1MDAxY1xm7eHInP5Hz+lU3E5tYSq+jPdcdTAwMDd2SpQzeKRKc1wiS1x1MDAwNFx0wspEl7TGpCwwRqqMheTSM1VcdTAwMDG0zsKDxvMnhn8+m9n/f/2+XHUwMDFlj0PnebhcdTAwMGWOhPvCkVOOtVZ8dzRW25nLavM2cu1e3dfiLVx1MDAxNFf6snfqaMTMwlpcdTAwMTJcdTAwMDTvglxiuVx1MDAwNEYsLU5cdTAwMTiVmFx1MDAwYk2V4MtcdTAwMDN7XHUwMDAz2b+wY/68XHUwMDFmiJhwi1xiKiRWnrk3xyRdwSSjXFxcdTAwMTMhXHUwMDE42YLJL42oarczzLpT8+JcYlrYemG33dZkYbLMJjk86GCvd1bo9putru2ZLGZvsOXWzLT/0XKqi3hcdTAwMTi6XHUwMDAwsrfdw65n3pbharbbcfqrT6fbd2tux27lNl5cdTAwMTnu14n9fJPYwtwzS1x1MDAwNo7Za7brjVx1MDAwMO475eHLXHUwMDE0XoNiTsjy1p8oJogwTVx1MDAxOabzp7dccsZcdTAwMTfDy0Ag6pZ7XHUwMDA0lYuT26brkuHos2A8XHUwMDFm5l4wJlxmWVhQzUG7UobmV37BsVRcdTAwMTZBVCgmOONcbit1NFx1MDAxY1x1MDAxM1x1MDAwNVx1MDAwM1HUPHaiuGBrsFxmXCLeXHUwMDAyoMNwuKSgXHUwMDA2sFjGtqaEaDCHPlx1MDAwZtr/mZ/0dcbR1y1/+SP+7Tvzb79NXCJE75BcdTAwMTTRrLioT+g0mku1XHUwMDExXHR5XHUwMDE0vWdq2/1+9+nH256/Vsa/Kk9cdTAwMDQmlHnxtUWevFx1MDAxZlpcdTAwMDIrP2hhxYyJRvawV+8mbm7E0+GLXHUwMDEyqvdtfp1cdTAwMWLZpfjJQ0tblIG2YaBcIlx1MDAxOV+EXHUwMDE2kTCXXHRjkihCKVwiUnxcdTAwMDRZ/6rOfo6EKqBcdTAwMTaYK+p5oetR9dDLlGuDq+6AhcLdfDTauGv3h6eBqlBcdNf0yIlcdTAwMGbjd5e9WPx5Oqlcbnk4VCnkkYxcdTAwMWZElS9cdTAwMDNkwtfk1Fx1MDAxOFx1MDAxOWWld9dVVylcdTAwMTGhvandu0km71x1MDAxZVx1MDAxM53idadwfuomp6SWlpzA8yZKSbaEKC0sJiVMZIwwxmB1fpRcdTAwMDD6o2o38lx1MDAwN6Ypw6BW0Vx1MDAxNtxcdTAwMTREeFx1MDAxMJJXJZqYPlxyrq5cdTAwMDJcdTAwMTk6yEW93K7IgrF0MKCeXFxdXHUwMDEy43at1HdlznvA4ZD3Tf7OtpM/KrVcdTAwMWZcdTAwMTLpTFZ6XGaQbUDU7vVznTV1aXhzX+hMJs2nJ1k6cSBcdTAwMDL6LFx0XG5eXHUwMDBippXWSCxcdTAwMDBcdTAwMTGooYVcdTAwMTCVimuk4IBcdTAwMGZxP39cZmJuSYmZXHUwMDAylrlcdTAwMWLvXHUwMDEzUlBseOKnqbEvw/tabtn5r8FZXGJcdTAwMDRjuf6rKOC2QVx1MDAxY51cclxuTpe3vpmsMJ1cdTAwMTVcdTAwMDMqtLuGvVbV24dHVCy4uVKh/lAodVx1MDAxZmrlUzdZubZcdTAwMTRcdTAwMDXRXG6qiyCFXHUwMDE3faxScotcdTAwMDBcdTAwMTMkmDGBxCmbrMauXHUwMDA2XHUwMDE1sY1cdTAwMDeWQ/r6uflcdTAwMWO7cVx1MDAxMyrUrDxG7lG+d1x1MDAxYVx1MDAxNmtcdTAwMTlcdTAwMDdcdTAwMWXoQIupk7+8LT+VnmvBQvhgXHUwMDE2q8RaXHUwMDFl32LlyNe9ojWTYLHuricjgXrxJjaM8e5cdTAwMDNcdTAwMWI9JqvJULnLT1xcT1LFLLDasaScU44oX4CTQtiiXHUwMDFh3lx1MDAwM0xWquSvt1cpkVx1MDAxODO5zVxcPa+GnrtP/btWPaJcdTAwMWHNXHUwMDE2iXJZaXut0ekli4T7JTWJZOrXV1x1MDAwZuI5MMzeeFx1MDAwZjhcdTAwMWPsvs3VXHUwMDFkzFVcdTAwMTiHr17TVCl4XHUwMDEwu3tiXCJu6LZRdMSt21x1MDAxZmV0YPQ8rSh04jgkWluEg1xup4SAdltEIeWWJlxcUcYpJoL6gfBjOlxyKTCJpVx1MDAwMrW6i6nKOVx1MDAxY8s+U319XHUwMDE1SzXULf1qO3XLXHUwMDEwjm6lYq+vaSVoQYWAXHUwMDE5Lnc3U9PjJ1x1MDAxMFW39WAvqKJ5p8fzXHUwMDA1enviZiqwSlx1MDAwYlx1MDAxNJVcdTAwMDC4YiSRmlx1MDAwM2hcdTAwMTa1oFxmXHUwMDE5T5CYuYqAo+KlkVx1MDAxZC5owWAgcFx1MDAxZCVcdTAwMDUjxrU+fzFv6GZIWCD9hcAgYcCqns+HV7BcdTAwMGJKkdJoq9qN8F7yMlx1MDAxYdDlW5vYsUkjI3v3ydOwVY9cdTAwMWOz4FRcdTAwMWbMVnVaLbc3WFx1MDAwYiytfdWkXHUwMDExyNgknexcZqv6OH9dfVx1MDAxZVx1MDAxNvrRSHzUian+eMq6J1x1MDAwZStQg1x1MDAxNkaawDxVaCVcdTAwMTZonD5cdTAwMWEwXHUwMDA101xcclx1MDAwMfzKN8Hm47FATSxEXHUwMDAwU2AuUcnW+neUWDhk1ZqVUnHQoVx1MDAxZbP683FFtuPqkLp148z3ZWlcdTAwMTh5JOSKfahcdTAwMDRcdTAwMTbwutXOM//88mGcT1RcdTAwMDO97m2k7Vxc1d1mzv20mf8+XHUwMDAzkSGuLEkwwlx1MDAwMlx1MDAxZbyUUixNfaotjo03U1xihYHM+bo0PzO3XGYoJZNU0W3+TDdcdTAwMTZpNKv3vZDM3YtuXtVv407DS8fy133avM92xldcdTAwMGZaZmr9+FMwmPJcdTAwMWVwOIh887Wz7XxNU38/pGaUXHRcZtbGzngss0BOP1x1MDAwZW9cYn6spVL1biBQKTycNlx1MDAxZamWzJJUXHUwMDEyUP1gzaLl9DJibCpFJSgjk0lwxKxcdTAwMTSsuSVcdTAwMDTYcDtcdTAwMTI3YaikpNvSPT9cdTAwMGJQm4y2Ok1cdTAwMTJZRm5gUIteJHmYpzRu7ma0/b7pvLe5ZFx1MDAwNTc7VT68XGK6w+ywfz5cdTAwMTlGXHUwMDBlcN5Sml1FnWyn9lhcdTAwMGWRi3Y1XHUwMDExdm/0wYzMz6WxKbtXccZnWbfilO3+WabffZ58LovdPIJDkFhf6cZ8pVx1MDAxYqVcdTAwMTRzsbto07a8XHUwMDAyWT29v8+qXHUwMDFlLtdcdTAwMWXwQ7N56qJccsxoxjAlJnCKPMHI2fe5hUDWgNFFtUBY0+PZ2GDqWDvLNUxcdTAwMTFcdTAwMDWtpD+Po35cdTAwMTWHlDdx9SzT/YVps4tXP1x1MDAwNII3MFx1MDAwNrXBo4xcdTAwMTihhNHdXcrP+PYpl0q0rnP1J+nag6xqXGZcbqdccmNAXHUwMDBlsrimyoCUXHUwMDAw01xcXGbtXHUwMDA0tDbJXHRcdTAwMThMSiChVHpcIj+/jjBw4PZcdTAwMTJLvo0vlCuNfCDci1x1MDAwZd1IduJMXHUwMDFmU5Ve2atof+CLUDpcdTAwMTjuJeOPeKzHdV5+qFZcdTAwMTO/gi9cdTAwMWPJvDmmufBcdTAwMTXZiK93gFx1MDAxOZ/nXt7meHVwLsrtfDVcdTAwMTNgqam6uiU3sftThzqSXHUwMDE2vHGElCBcdTAwMDRLslx1MDAwNHUx0+cgXHUwMDEx4WmA9U8/lFx1MDAxNbFlqYu0lDCmw45xJCyYXHUwMDEyQmO0LZD0z9Tb4Z+K9vOV9ppLXHUwMDFmQmO/SLI1ICaS+aYsYtBPXGIjukdcdTAwMTB4s3A9XHUwMDA0jCvdobn8Wlx1MDAxY7/Pvc2QJFx1MDAxNohTrbRcdTAwMDRIU7S8gFx1MDAxNHZ/jlNcdTAwMDFZQlx1MDAxMFx1MDAwNVx1MDAxNjXmJv9D0DXqXHUwMDFizFx1MDAwYnhnXHUwMDA09lOwMJhYxTZXlFCQXHUwMDA0/yD/tleL9och90WBLlxm7HWFdXxcdTAwMDfX10xIlGfKOICAd1x1MDAxMYxcdTAwMTjSxGBcdTAwMDF0rOCe42p2z1xmXHUwMDFir9wu6PDtw9hs3y5cckNhXHUwMDA1isRk1yHGXGLXq6OAaWq8voBY+CNcdTAwMTRZXHLetezBMNxtt12DoUzX7Vxml5/r7Fx1MDAwMVx1MDAwNo20qDv2ykuHu/Lug9nsLoWje+aki1x1MDAxM23+29lcdTAwMWN5s1x1MDAwZm+//9/va49cdTAwMGX4XHUwMDAzwvysQGF+vp1cZpst0XThL1x1MDAxODWiVHKud1x1MDAwZn6oXHUwMDFjvsw4xYtudnxcdTAwMTcrPjbbncHFpzlb31x1MDAxYk1cdTAwMTfUXHUwMDAy4Vxiz1x1MDAxZcOMY0tUXHUwMDA2M9grXHUwMDEw11opXHUwMDAxXFxcdTAwMDb7UpmTiKZcdTAwMDO94TDQbUIxXHUwMDFhqVx1MDAxN4ah2C07x73M85RcdTAwMGLMw+5pXHUwMDA00z/g5/x903m/0Fx1MDAxMqhNQXrp6z1UkjDGvHmU29DauKSBhlx1MDAxM5Ky0MCPQ5ZcdTAwMWR0hq3AiaNVU21RoaQkXHUwMDE4m1x1MDAwZotg1cxcdTAwMDLrRlx1MDAwMifTUlF2RP/hIWL0XHUwMDA0yFx1MDAxNMicrSsyjonWXHUwMDFkTJhNqPpAtGNcdTAwMDdUXHUwMDFkNFx1MDAwMcCX4HPkXHUwMDA3KVO6RFx1MDAxM5hru2NqOs2LiEt7T7GaimUvy+lJOjv5LEy9O9woLEk1kFx1MDAwMq6Qt7KH+T7TytKMcVx0Jlx1MDAxN1x1MDAwNZ6vj6dcdTAwMDBcdFx1MDAxN5bGu0dcdTAwMWLhSM5cdTAwMTne5pU/XHUwMDFjfL5cZrtfv5boXHUwMDE3OOh3XHUwMDE5yHF99Vx1MDAxOPkvXHUwMDFillLgvUpcXKjidS1cdTAwMWO96ndI91wi6fTu8XnSO+FPXHUwMDEx3WDaMktxSlx0XHUwMDE4loJcdTAwMTKymNxDNIa9XG5AoU2Gtj/t/0RPvcJcdTAwMWF4KN6aupbE0mb44lZcdTAwMTdKsUugmIFqqVjzKJ9cdTAwMWb5c11y8zlcdTAwMTeHxqNErZhcZif004X3gC8tXHUwMDFhvqAvXWxQtcCxzFqMPcjmTadcdTAwMTiIXHUwMDA2h9XMOHeTTGh7pO1PK4rxXlUrhCUowFAxQ/SXyKZZOqxcdTAwMTioQYyoklx1MDAwMNkj2q9gRyO8R90ooL5gyFx1MDAxMv6tbVe0rXFoj1x1MDAwNsNu21x1MDAxZDiVX+pW3ziKT1iasWGlXHUwMDE1XCKIclx1MDAxM4zZXHUwMDE53/Hc+ajX719FrvNN/swj9/lg4NPKwr3bmaQswSnSglx1MDAxYuNULeJcdTAwMWKUrMVcdTAwMDFFXHUwMDAyISy4VHppYKflS8JcZlx1MDAwNJCh2evT+b7XXHUwMDA3XHUwMDFmwJ2D/auaXG4yc3Lz3Vx1MDAwMaOdWqjhpEYq1a3108WaXHUwMDFhTmufllx1MDAwZvZef1x1MDAwZWNcdTAwMTZcdTAwMTjhXHUwMDEyc7OgaalKXHUwMDE0aCdcdTAwMGIxkCpcdTAwMDRYuJLoeCuZXHUwMDBl4c7heFac9Thw+aCf5rSyR99lPCr/9UkmLVxcUlONZWesVIQ9ucj0nMegPK9cXLl1XHUwMDFkvWTBUzdcdTAwMWVcdTAwMTW2tOKgOzRbdtNoyY09Z7JcdTAwMTSZXHRkXHUwMDFjL1xyw9R1w1x1MDAxY1G9c/YkV2Doan5cYmB8XHUwMDE1m3DtKtnP97/sMIwjZ0py3+wp4EKMXCK8R9G2op2utSah89Zd+zxcdTAwMThmKCbP3Vx1MDAxM893ZkghXHUwMDBiXHUwMDAxb1x1MDAwMkNcblx1MDAxNFxiXUItmGiWNj5mU1rHpFD5ofZTXHUwMDE3Vlx1MDAxOU9cdTAwMTCRW5me/XBdXHUwMDBlJp/Veeqc3Fx1MDAwZuUk8eTUkl7vXG5NZVx1MDAxYn2Cp7qQkDlRXHUwMDFhZZu1VParUMEv6F3xpPSsLIxHQPil2oN90Uk4WpqOXHUwMDFl5UX4MqPT7eT1VbV92mCDO8SWUoRIeO9aM73Ivlx1MDAxNFx1MDAwMrBhrIHSXGIgRscjX3v7VjRcdTAwMTdcdTAwMWPsSp/U5FNHyrfT5L1piVx1MDAxNPlDXHUwMDE2XHUwMDBiTjFcdTAwMTi1ezhEN1x1MDAwN2NPNC1cdTAwMTFLacFcdTAwMDTEpvo28K6VXG7cwoKNmDBlKlt7q1x1MDAxN1x1MDAxZT76yE2xb1x1MDAwMlxmTjIhtGeB/1x1MDAxYnQ5XHUwMDA2ZCsuKFx1MDAxNWBws1XlqbHCkqCtIf3qbU3eXHUwMDA1r57rxZtcdTAwMDFcdTAwMGI/3vbzyVj8Oyvx5abn6YBSXHUwMDAzXHUwMDA2XHUwMDE42InGIe5pKXH2M1x1MDAxYlDCKzOl7rDZa8pcdTAwMWSs3PtOKYqbM2E8Y0KWhlfPMDarSVx1MDAwNFx1MDAxMCC6MiQsLJM5x1x1MDAwNVx1MDAwNj3EudJsZUxfLEHRXHUwMDFmXHUwMDE5L3vhPcFeTZRiXG70q9h2PqHB6EVKaimZd3Wz+VlB2PxkO1x1MDAxOUf+slx1MDAxNuNN5lx1MDAxMYhaJtHueVx1MDAxZZtcdTAwMWQqpylrzaJKXHUwMDBirPuZZjGLtpaWcoBcdTAwMTVrMiFcdTAwMTVFTFDgLOyI4ScgPmb5jKRKMLDF5ibRvMolwqBvMVx1MDAxOK2cIElBsK5cYltlloLIrXXC3Pp1pH4x7NBmhd83XHUwMDAygXo5W1Dfwvblpl9cdTAwMDWbcbQyrVx1MDAwMXqSIeP9XpW1puNccsBcdTAwMWbYjnHoYMb4+4TtZje1V1x1MDAwMcCgQNpgXHUwMDEws1x1MDAxODRcdTAwMDGieDVcdTAwMWScmD5cdTAwMDJ0tp7BxDK4XHUwMDEyK4P6YtI24ItccvPDZlx1MDAxNIdcdTAwMDM2wVx1MDAxOFx1MDAxMnDA1tNR0EdYmCYnIG0l01x1MDAwYuJ2XHUwMDE1ZHvK2839ZTyVZFaaYHAhqamlu7PIXHJe0cc+zTnxXFw6XHUwMDFieujEY4/T4dNhRa49qPuJ3HdSUqRMXHUwMDBiXHUwMDE5rrnJI4S5vCRymaGKZrFcdTAwMDPlWlx1MDAxMKI+VH9x8+o5xsHQXHUwMDA2hFx1MDAwM5A0oJfM+e9cXORyoSxcdTAwMTD8pspcdTAwMGYy69091vZryircXHUwMDBmV0j7JN1cdTAwMWQ/JvjLw1x1MDAxMN5cdTAwMDYyy3OaUM5cdTAwMDSXe8zpafS+XHUwMDEzyOUj6UYte627MXtK1ODEvSxmSVx1MDAwNGhcYkFMeyiBl6pkXHUwMDA3qMaW6UNk1oUz05HwiFx0o4JboP5cdTAwMDXfNVx1MDAxMmFcdTAwMTLLgFH7LFx1MDAxYftbOlpeK51EOmO33+2YXHUwMDAxnoV+ei5cdTAwMTZcdTAwMGX+pJIrm1x1MDAwN3LUaIQk/j2ZuElx5nyPslxyj+WHZ0xSOPaQKjVv49dcdTAwMTFSbchcdTAwMTNnXHUwMDAwxpCyyEtccjcw4/BcXFx1MDAwMrxcdTAwMTSWwsowM2J2XHUwMDAyXHUwMDAzk/41XHUwMDBlPzVcdTAwMWVcdTAwMDHmXHUwMDBiMjGSLWa+nVx1MDAxMeXMZVx1MDAwMmh5XCJso9R9p1x1MDAxNcClv1W4QVdI1dSm1LayNapUnFJFVDVcdTAwMTYlXHUwMDBlW5S0XHUwMDFkXqlSWlx1MDAxNZ9cdTAwMTJukFx1MDAxYnJXlFBcdTAwMTTDXHUwMDAz2SPc8Ky7LqPjQSHabDixaULUk8lTR1x1MDAxM5NcdTAwMTSsN0aFNPkgSCxcdTAwMTXixab+PSOESmlaMlx0j6Y8uHWHhTHuyK5cdTAwMTFcdTAwMDeCzUJfRbaWNPosUL1dcU3ySqxcdTAwMWXu3+FGI1VNP+VZu1bKXrHOL0te+Zg6zvSdses8nd3epD5X/a698HGLLmyMblDT/2Kf6P/mWXCiXHUwMDEygptKSCawIVxyufOKzNfwXHUwMDA2skyFJOBTpl1cdTAwMGLzL5T08YxQYJomXHUwMDE2zVx1MDAxOTJcZoWvXHUwMDExXHUwMDEz2FJSmXg2o0rO/O6ruti0tJLellefm+P2K31pm1XUmTdIgLl524RcdTAwMGKuqSlU4TnoxW2lLdN5wiy5M6nCdDVEsOBJW7yJr+XO8p135md1xs1P9yHjRFx1MDAxM1/Pk8JcXFx1MDAwMe7k7pZ+MpjPJtJXyWK0XHUwMDExl11ZXHUwMDFm39PBifcnJ0Igi4JNXHUwMDAyT1x1MDAxN/5IseR3XCLILFxuM6m11GSzXHUwMDEyfry4KiXaQlx1MDAxYUstdu1QzimSciGb82/P0uvDYW/w51x1MDAxZn9cdTAwMTi1XHUwMDFicOZcdTAwMTTZsns9q2X3LKDOn2sv7Deio/J2xf0zf5VcdTAwMDbxuleB9qSsodzVoClYNFN6jFdGN+OcOHk7glBg5kqbNlqCkSXejjFcdTAwMDWwc4KENl1+8PLIflx0bZfUdKGlW9NcYlx1MDAwM7nMdVx1MDAxNEUnj6PHok2b91x1MDAxMZy+el8z8m/avoNmVP7dXHUwMDBlNMfcJJbvjqXutJhcdTAwMWSG29fXpNnJhZ/PXHUwMDFkt0jxqWOJSbDJMdZMckEoXmbtUllKS6xcdDB3gfRcdTAwMTEz6fcl7ViaOjPc60f4tZB6u+K6XHUwMDA1WrlOe1x1MDAxMmjfXthVwvOVPO7WSodrivNN2o9F2lx09yftnGJu8md3N503z4JcdTAwMTNcdTAwMTVcdTAwMTBcdTAwMDJcdFx1MDAwYvRcdTAwMTbwXHUwMDE1xEy10UX5ILhcdTAwMDWkjTIuiVx1MDAwNM5cIo7n1DtcZmUnSlx1MDAxMI6Yj9D4e1P2zfrpzJ+yc+lJk1jH2Zlak2nyTdo/ZJpgpH1cdTAwMDN0ylx1MDAxNPuCd7O76Fx0tNN32baY3IqrZMRcdTAwMWU7j0O7kjio6DlcdTAwMDJrJyahXHUwMDE1I1x1MDAwMvOMXGK17C1EyII3Y/JhXHUwMDE15/A0jkjaNbOE2J2zXHUwMDAzXHUwMDAzXHUwMDAzYYnYPyiw/pMg26bGUaA0W2N3WtR9v5FcdTAwMWQ39L6pY1x1MDAwMtWm+izfnXaUMmWOxrRcdTAwMWNoJKuXwfxziU9cdTAwMWZcdTAwMGbbMeEooXfT3YSb3lx1MDAxMJxgqZfBrSysYLvUs5ryXHUwMDEynULPXHUwMDA0IEIwWLq1OWc8V1xyXFxFXHUwMDFlSIhQXHUwMDFlL1TSI5fF899cdTAwMWP+aKF36d+xUDBuXCJKe5RcdTAwMDE9TydcdTAwMDKo4FxckUIrnlx1MDAxOF1lq8Ewj506mphEllSKmlqgyvviZ99cdTAwMTeWWU1HwV7AYDKgI6ag7cvhhUQwJuHj2/58RL1dcVxyhVx1MDAwZqTGiUm7fkeGcXnR5kF6XHUwMDE3Tp9/U/j9lPEvoPBcdTAwMTj7ilx1MDAwNyU1R0qx3aXD5klwotKBa2kxXCJcdTAwMThcdTAwMTOU66VOXHUwMDA3RFsgN4RcdTAwMDJmXHUwMDBmLI4rX2f5ifB32MtcdTAwMDXFPulvf2/+vlk1nW1cYrlTXHUwMDBmZf3m75/F3zd0boXhXHUwMDEwIJR7dFnJ1p9cdTAwMWFXwcdGI1x1MDAxMEneR+8v0DVvsJMn8NpcIjDBTG5cdTAwMWRBaLG9YUBYkmiw8MG+XHUwMDE3mHhbjFx1MDAxZpy/K2GZTnQ7XHUwMDEzeFBcdTAwMWGmL8w/qUbPW4S7WzpJ+r7PuI7ZsVx1MDAxNCOyIY9cdTAwMGZmPGV6XHUwMDBm8s7jZZSoq8ZFK1NcbofLgVxibaCTb5CuTOlcdTAwMTBccvZcdTAwMDRiwmiXRVxcY65Mk1x1MDAxMFPhXHUwMDEyTFx1MDAwYi7k8WKG+2fTSM20Vv+o4lwi39k0XHUwMDFi0Iy1b96+WY4vNdqj3UHCxp3ufSRcdTAwMTm6xVx1MDAxN4VYvtkrdlx1MDAxYadcdTAwMGVmQLElzHpijiRTdNF1IFx1MDAwMeiGN3BkymBcdHJEXHUwMDE1vaeLXSswKkx/rn9cdTAwMWWOv13sO+Ca+GfbK1OyxLTL2F1JMyRvM9fJzmXoSpeHUVxce6g0Tlx1MDAxYteUamVcdTAwMDHbUaZWXHUwMDEwx8vrUoHoW4xI08VUI47p8WrJ7m17m0rQ3Fx1MDAxNO785yH72/bepq09837Z9p51XG6g+1x1MDAxNPUrt1x1MDAxM0/2+VVm3Of5aaJyz4vd6/Fpw1x1MDAxYSg1tjQngoOqXHUwMDE03m7ML6a34Fx1MDAxNmxcdTAwMDWwSYK16fh7NFxcwzUsU6tzd9PbOFi4YD7t+P6WwIYnWHZMRM3586w1W/FtXHUwMDAwXHUwMDE0cCv/npm+3rnxXHUwMDE5eN5jOMeFsedcdTAwMTWsRuyUlHJcdTAwMGaXfO28oGoqkVx1MDAxMaFKtJ9sZmOqcHHiXYhcdTAwMDDFyFwiSpiCaFh7M9RfXHUwMDFjY8wyJb+QwGCdU+S/7vzjytlUK6OE7bFSllxuJJhPXHUwMDFh+z9cZsNv5i7oxdOBst+ojmtuM19fN6Gm2M8+a1KawWjx4nGUuU3i+sMwXHUwMDEzXHUwMDBmVSrTw65+P1x1MDAwNqJNaU4wXU3XJi5cdTAwMTdcdTAwMTFNsUWFKSjGmSSUY33EVDVGrL28YUIgfYhs2K9cdTAwMGbnVyv3pMC8fkxHXHKYYyH865RR0/5cdTAwMTOTPVx1MDAxMk9cdTAwMTG9Q1JEs+KiPqHTaC7VRuSwOe9H6FtEXHUwMDAxRNpoZ0Ayk3ixSWBcdTAwMDBjZSlcIik1nnAq9Fx1MDAxMYPmREmLSlx1MDAwNFxmXHUwMDFlgXBBal2ZMlx1MDAwYlx1MDAwNmJcdTAwMWFcdTAwMWEybFxc4Xy1TNmsXHUwMDA2L1xmmp9i0rswXd7fg/5cdTAwMWSD5unxU4ny23qwXHUwMDE3VNG80+P5XHUwMDAyvV1cdTAwMTc0XHUwMDBmIFNqVXJT8Z9jbFx1MDAxNlx0rFx1MDAxNlg05Ww5WEBSUkVMUdaVm9+p6OPF8DJcdTAwMTCIuuVcdTAwMWVB5eLktum6ZDjyieRjXCJcdTAwMTlcdTAwMTbY9DJcdTAwMDFSx1Yz8Vx0t8whplx1MDAwNiVcdTAwMTFaUL1aOO5cdTAwMWRcdTAwMDH7NUH5pbD9wvdcdTAwMGZcXPTRb9qbn9VcdD8/32/e//dcdTAwMTd9kmxIJVx1MDAxNEJcdI3o7nbM5ibrpyr6lImoIWQ8/oospuVSMHI0PHYjXHUwMDEyXHUwMDE1XCJYXHUwMDFjL5PwMIJPXHUwMDAzoFx1MDAxNdU+vVN/seCDu9qnq9q+gk/l8GXGKV50s+O7WPGx2e5cZi7WXHUwMDE2u1x1MDAwNVx1MDAxOYOwSVx1MDAxN0JcbjFcdTAwMDHGIFst41xyMoaCXHUwMDEw4sRcdTAwMTSFw6aB7vtcdTAwMDTf3cTNjXg6fFFC9b7Nr3Mju1x1MDAxNF8/KCRMfW0k5Ys4lmuq3YLpXHUwMDAz7FlcdTAwMTMuJUKYq9U38i34znZcdTAwMTN81N/mw0BdmPJcdTAwMDb/tjpWN7YjPFHBx7RlwpxUwEziYpHAXHUwMDE5g9BsXHUwMDE1iCt26uZcdTAwMWUxVjr8nORcdTAwMWHHfXtJ7iv1NrdJXTL3QJOZbouEKVNcdTAwMDFH8tUsSaytmbZDTCOthcArd7+T2LtW1duHR1QsuLlSof5QKHVcdTAwMWZqZT+xZ+rQSbhcdTAwMThcdTAwMDExS/TqmFxiMSWsXHTDZsUtUOdvqfcq9X57vcJcdTAwMGa718tcdTAwMWE2/fZGXHUwMDAwXHUwMDAwbuXVVTC/j1x1MDAxZiZcdTAwMTU9tFx1MDAxOaK/vT7X2eKXOVwidu91NZ9cdTAwMTQ/2m7byXnXhv0xXHUwMDE41/77ud2aP/NcdTAwMTf5uX8jLYCZbXLq/3z59c+F0/+P8VxuXGL2eyaWJveTXHUwMDEwK1x1MDAxNZ5H5Sly7dhcciqfd8cpWqGVXHSnl1x1MDAxMz4ut8vjy0bw6TKsp5V22Y3HKr372E03k41P0uF4zY7me/ekjn5+rrRbrVxuSoydc+RehoNP8fM4mv11Q2278DzIZFx1MDAxM6NcdTAwMTLhLfg8rcRcdTAwMTJjm9zqeDuNy51Eq9S5OS+R5zFcXKVWpjeTXHUwMDEyXHUwMDE5tlJ38+t5vjMsRVuj+0K6XobrlNtcdTAwMWGX2tcwtjSC3/v3WTywXHUwMDBivGW3da/U8OyPplx1MDAwN8W79Fx1MDAxNMbQK1x1MDAxMj2KR1uNXHUwMDEyYcNK9MItRfOTl898mrq7qVx1MDAxN9vPrbKLx5W7m9m5lo9NXHUwMDE14Fx1MDAxZcPx/87EQvVKtPY2Xs/9NeH3tHlcdTAwMDY23H+lXHUwMDExR9nzkFh9XHUwMDBl6u1cdTAwMWP3cC+5XFxkXHUwMDEyP09cdTAwMGauwkGUauRcdTAwMDfwfV5cbiNcdTAwMTY/v1x1MDAxZaVcdTAwMGJPQ9g+TIdcdTAwMTkunVx1MDAwN2upXFxr8PPz+vNEJqXzXHUwMDFhXHUwMDFjXHUwMDA3383Gx5nG81Px7qZcdTAwMWKPXut4XHUwMDEzPV+6wefL9lx1MDAxM7yjppuaMlWOXiA7/DruXFy8dplLXGZSOe/3n8Zlet/J1P79b49fq+94ZbrpRCOpp5awXHUwMDExPjdcdTAwMGWIXHUwMDEzZ7x6lNc82X1cdTAwMTXZezC0/1x1MDAxMrVvXGZtxtCgUkj37qO3tVIs37BcdTAwMGI3rVShMiiRXHUwMDA0/D/Dysr2eIOpXCJpTYrkXHUwMDE58JRmMFx1MDAxZZiD8VqRNnW8XHUwMDExMWObzLaF187VSTFcdTAwMTdcdTAwMDFM3MK8NPPRPJdg7fX5wN/1+1I52LeCjdnc7sI7mc37XHUwMDE0hXc4ZVx1MDAxYua0pox5l1x1MDAxNa6f069HvSmfv3776/9cdTAwMDHCLc1UIn0= + + + + + App WorkloadAlice's Branch WorkloadBob's Branch WorkloadLapdev Sidecar ProxyApp Workload PodApp ContainerAlice's Branch Workload PodApp Customised ContainerBob's Branch Workload PodApp Customised ContainerLapdev Environment BaselinePreview URLhttps://base-environment.app.lap.devPreview URLhttps://alice-branch-environment.app.lap.devPreview URLhttps://bob-branch-environment.app.lap.devhttps://base-environment.app.lap.devhttps://alice-branch-environment.app.lap.devhttps://bob-branch-environment.app.lap.devtracestate: lapdev-env-id=base-idtracestate: lapdev-env-id=alice-brach-idtracestate: lapdev-env-id=bob-brach-id \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..d40a040 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,68 @@ +# Introduction + +Kubernetes makes deploying apps easy, well, sort of. But that 100 microservices app makes the development flow hard. You can't fit the whole app on your workstation, without pain. The most sensible way is probably running the development flow in Kubernetes itself. Have a pipeline to build and deploy your code changes. But the feedback is slow. + +Tools like Skaffold can sync your code changes to pods directly, and tools like Telepresence can intercept traffic to your local machine which enables you to do local debugging. That makes things feel like good old local development again. But, + +**How do you manage all the Kubernetes resources?** The deployments, ConfigMaps, services, secrets - who maintains them? Do developers share the same resources, or does everyone get separate environments? If they're separate, how do you keep them in sync? How do you ensure changes to the app propagate to all dev environments? And what about domains, DNS, and HTTPS for developers to actually access their work? + +## Why Lapdev + +Lapdev Kubernetes Environment solves all these pains so that you don't have to. + +### **Seamless Environment Management:** + +Lapdev reads your **production Kubernetes manifests directly from your cluster**. Just tell Lapdev which workloads your app needs, and it automatically replicates: + +* The workloads (Deployments, StatefulSets, DaemonSets, etc.) +* Associated ConfigMaps and Secrets +* Services and networking configuration + +**Your production manifests become the single source of truth** - no duplicate YAML files to maintain, no config drift between prod and dev. + +### Stay in Sync with Production + +* Lapdev continuously monitors your production manifests for changes +* App Catalogs automatically update when their source workloads change +* Environments **notify you when updates are available** - sync with one click when you're ready +* No surprise interruptions during development - you control when to pull in changes + +### Flexible Environment Models + +**Personal Environments:** Each developer gets a complete, independent copy of all workloads. Perfect for testing breaking changes or complex multi-service interactions with full isolation. + +**Branch Environments (Cost-Effective):** A shared baseline environment runs all services once. Developers create lightweight "branch environments" for only the services they're actively modifying. Lapdev automatically routes your traffic to your version while everything else uses the shared baseline. + +### Local Development with Devbox + +Lapdev includes **Devbox**, a CLI tool that integrates seamlessly with your environments: + +* Intercept cluster traffic to your local machine for real-time debugging +* Use your local IDE, set breakpoints, and see live logs +* Transparently access in-cluster services (databases, caches, internal APIs) as if you're running inside the pod +* No complex VPN or tunneling setup required + +### Preview URLs with Zero Configuration + +Create Preview URLs for any service in your environment: + +* Unique HTTPS URLs automatically generated for each service +* Automatic TLS certificates and DNS configuration +* Traffic proxied directly to your in-cluster services +* Optional access control - configure URLs to be accessible only to Lapdev logged-in users +* No firewall changes, no manual Ingress configuration, no cert-manager setup + +Share your work with teammates, PMs, or QA instantly - they can test your changes without any cluster access or VPN. + +### Easy Installation in Your Cluster + +Lapdev requires just one deployment `lapdev-kube-manager` installed in your cluster. That's it. + +## Getting Started + +1. [Connect Your Kubernetes Cluster](how-to-guides/connect-your-kubernetes-cluster.md) +2. [Create an App Catalog](how-to-guides/create-an-app-catalog.md) +3. [Create Your First Environment](how-to-guides/create-lapdev-environment.md) +4. [Start Local Development with Devbox](how-to-guides/local-development-with-devbox.md) + +Learn more about [Core Concepts](core-concepts/architecture/README.md). diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md new file mode 100644 index 0000000..da4c2f8 --- /dev/null +++ b/docs/SUMMARY.md @@ -0,0 +1,22 @@ +# Table of contents + +* [Introduction](README.md) + +## How to guides + +* [Connect Your Kubernetes Cluster](how-to-guides/connect-your-kubernetes-cluster.md) +* [Create an App Catalog](how-to-guides/create-an-app-catalog.md) +* [Create Lapdev Environment](how-to-guides/create-lapdev-environment.md) +* [Local Development With Devbox](how-to-guides/local-development-with-devbox.md) +* [Use Preview URLs](how-to-guides/use-preview-urls.md) + +## Core Concepts + +* [Architecture](core-concepts/architecture/README.md) + * [Traffic Routing Architecture](core-concepts/architecture/traffic-routing-architecture.md) + * [Branch Environment Architecture](core-concepts/architecture/branch-environment-architecture.md) +* [Cluster](core-concepts/cluster.md) +* [App Catalog](core-concepts/app-catalog.md) +* [Environment](core-concepts/environment.md) +* [Devbox](core-concepts/devbox.md) +* [Preview URL](core-concepts/preview-url.md) diff --git a/docs/core-concepts/app-catalog.md b/docs/core-concepts/app-catalog.md new file mode 100644 index 0000000..1866b0a --- /dev/null +++ b/docs/core-concepts/app-catalog.md @@ -0,0 +1,79 @@ +# App Catalog + +The **App Catalog** is the foundation of how Lapdev understands and manages your application in Kubernetes. + +It defines **which workloads belong to your app** — such as deployments, statefulsets, and their associated configuration — and acts as the **blueprint** Lapdev uses to create consistent and production-aligned development environments. + +### Why App Catalogs + +In most Kubernetes setups, application configuration is scattered across multiple manifests or Helm charts.\ +Developers often need to manually manage which workloads belong to which app, and this can easily lead to configuration drift between environments. + +The App Catalog solves this by: + +* **Defining your app once** — directly from your production workloads. +* **Reusing that definition** to create any number of environments (personal, shared, or branch). +* **Keeping everything in sync** with your production manifests — no duplicate YAML or manual updates. + +### How It Works + +1. Lapdev connects to your **source cluster** and reads all workloads running in it. +2. You select the workloads that make up your application (for example, `frontend`, `api`, and `database`). +3. Lapdev groups those workloads — along with their related **ConfigMaps**, **Secrets**, and **Services** — into an **App Catalog**. +4. The catalog is stored in Lapdev and can be reused to create environments on any connected cluster. + +> 💡 The **source cluster** is typically your staging or production cluster.\ +> You can also use a dedicated cluster that mirrors production manifests.\ +> Learn more about cluster roles in [**Cluster**](cluster.md). + +### Relationship Between App Catalogs and Environments + +| Concept | Purpose | Example | +| --------------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------ | +| **App Catalog** | Defines what your app is — a collection of workloads and their configuration. | `checkout-service` (includes `checkout-api`, `payment-api`, `frontend`) | +| **Environment** | A running instance of that app in a specific cluster. | `mia-checkout-feature` (personal env) or `staging-checkout` (shared env) | + +You can think of an **App Catalog** as a _template_ and an **Environment** as a _live deployment_ of that template. + +### Editing an App Catalog + +After creating an App Catalog, you can modify it anytime: + +* Add or remove workloads +* Update its description +* Sync it with your source cluster to reflect production changes + +Lapdev automatically tracks these updates and keeps associated environments consistent with the latest configuration. + +### Typical Usage Pattern + +App Catalogs fit into your workflow like this: + +``` +Source Cluster (Production/Staging) + ↓ + App Catalog (Blueprint) + ↓ +Multiple Environments (Personal/Shared/Branch) +``` + +Once created, catalogs can be synced when production manifests change, keeping all environments up to date. + +> For step-by-step instructions, see [**Create an App Catalog**](../how-to-guides/create-an-app-catalog.md). + +### Benefits of App Catalogs + +✅ **No config drift:** Use production manifests as the single source of truth.\ +✅ **Reusable blueprint:** Create multiple environments from the same catalog.\ +✅ **Simplified management:** Centralize workloads and configuration in one place.\ +✅ **Scalable:** Works across clusters and environment models. + +### When to Use Multiple App Catalogs + +You can create more than one App Catalog if: + +* Your organization manages multiple independent apps (e.g. `checkout`, `search`, `analytics`). +* You want to separate internal tools from core services. +* Different teams own different parts of your system. + +Each App Catalog can have its own lifecycle and permissions. diff --git a/docs/core-concepts/app-catalog.mdx b/docs/core-concepts/app-catalog.mdx new file mode 100644 index 0000000..b961bdd --- /dev/null +++ b/docs/core-concepts/app-catalog.mdx @@ -0,0 +1 @@ +export { default } from "./app-catalog.md"; diff --git a/docs/core-concepts/architecture/README.md b/docs/core-concepts/architecture/README.md new file mode 100644 index 0000000..af86a7e --- /dev/null +++ b/docs/core-concepts/architecture/README.md @@ -0,0 +1,90 @@ +# Architecture + +This document explains how Lapdev works and how its components interact to provide seamless Kubernetes development environments. + +### Overview + +Lapdev consists of three main components: + +1. **Lapdev API Server** (SaaS) - Manages users, authentication, and orchestrates environment creation +2. **Lapdev-Kube-Manager** (In your cluster) - Reads production manifests, manages dev environments, and pushes routing/intercept state to sidecars +3. [**Devbox**](../devbox.md) **CLI** (Developer's machine) - Enables local debugging with cluster connectivity + - Plus an auto-injected **Sidecar Proxy** per pod that enforces routing and intercepts at runtime + +### Architecture Diagram + + + +### Component Details + +#### Lapdev API Server (SaaS) + +The Lapdev cloud service handles: + +* **User authentication and authorization** - GitHub/GitLab OAuth, team management +* **Environment orchestration** - Receives environment creation requests from users +* **Secure tunnel management** - Establishes websocket tunnels between your cluster and Lapdev +* [**Preview URL**](../preview-url.md) **routing** - Routes traffic from automatically generated HTTPS URLs to your cluster + +**Security:** + +* Communicates with your cluster via secure websocket tunnels (TLS encrypted) +* No direct access to your cluster's API server +* Receives workload manifests (Deployments, StatefulSets, ConfigMaps, Secrets) from lapdev-kube-manager to build App Catalogs +* You control which workloads Lapdev can access through App Catalog selection +* No access to runtime application data (databases, logs, persistent volumes) + +#### Lapdev-Kube-Manager (In Your Cluster) + +Deployed as a single Kubernetes deployment in your cluster, `lapdev-kube-manager`: + +* **Reads production manifests** - Discovers Deployments, StatefulSets, ConfigMaps, Secrets, and Services from your production namespace to build [App Catalogs](../app-catalog.md) +* **Creates dev** [**environments**](../environment.md) - Replicates selected workloads into isolated or shared namespaces +* **Manages sync** - Monitors production manifests for changes and updates dev environments +* **Publishes traffic routing** - For [branch environments](branch-environment-architecture.md), computes routing tables and distributes them to sidecars (sidecars enforce routing; see [Traffic Routing Architecture](traffic-routing-architecture.md)) +* **Establishes secure tunnel** - Maintains websocket connection to Lapdev API Server for orchestration + +**Permissions:** + +* Read access to production namespace (to read manifests) +* Full access to Lapdev-managed namespaces (to create/update environments) +* No access to other cluster resources + +#### Devbox CLI (Developer Machine) + +The `lapdev devbox` command-line tool enables local development: + +* **Traffic interception** - Routes requests for specific services to `localhost` (controlled via Lapdev dashboard) +* **Cluster connectivity** - Provides transparent access to in-cluster services (databases, APIs, caches) +* **Secure tunnel** - Establishes encrypted connection to Lapdev API Server, which proxies to your cluster + +**How it works:** + +1. Developer runs `lapdev devbox connect` and sets their active environment in the dashboard +2. Devbox establishes secure tunnel: `Developer → Lapdev API` + - Lapdev-Kube-Manager stays in the control plane, pushing intercept metadata and (optional) direct-connect hints to sidecars, but is not on the data path. +3. Sidecar receives intercept config, opens its own tunnel to Lapdev API for that workload, and forwards intercepted pod traffic over the tunnel to the Devbox CLI +4. Developer's code processes requests on localhost; responses flow back through the same tunnel to the pod +5. Non-intercepted traffic continues to the in-cluster service transparently + +Learn more: [Devbox Concept](../devbox.md) | [Local Development with Devbox](../../how-to-guides/local-development-with-devbox.md) + +### Learn More + +**Specialized Architecture Documentation:** + +* [Traffic Routing Architecture](traffic-routing-architecture.md) - How Lapdev routes traffic between components +* [Branch Environment Architecture](branch-environment-architecture.md) - Cost-effective environment model details + +**Core Concepts:** + +* [Cluster](../cluster.md) - How clusters connect to Lapdev +* [App Catalog](../app-catalog.md) - Blueprint for your application +* [Environment](../environment.md) - Running instances of your app +* [Preview URL](../preview-url.md) - HTTPS access to your services + +**Getting Started:** + +* [Connect Your Kubernetes Cluster](../../how-to-guides/connect-your-kubernetes-cluster.md) +* [Create an App Catalog](../../how-to-guides/create-an-app-catalog.md) +* [Create Lapdev Environment](../../how-to-guides/create-lapdev-environment.md) diff --git a/docs/core-concepts/architecture/branch-environment-architecture.md b/docs/core-concepts/architecture/branch-environment-architecture.md new file mode 100644 index 0000000..7f818ff --- /dev/null +++ b/docs/core-concepts/architecture/branch-environment-architecture.md @@ -0,0 +1,184 @@ +# Branch Environment Architecture + +Branch environments are a cost-effective way to run development environments in Kubernetes. Instead of duplicating all services for each developer, branch environments build on a [shared environment](../environment.md) and only run the services you're actively modifying. + +### Architecture Diagram + + + +### Concept + +Without Branch Environments: + +``` +10 developers × 100 services each = 1000 pods total +Every developer runs a complete copy of all services +``` + +With Branch Environments: + +``` +Shared environment: 100 services (shared by everyone) ++ 10 developers × 2 modified services each = 20 branched services +Total: 120 pods +``` + +**Example:** Alice modifies `api`, Bob modifies `worker` - they only run their modified services while sharing the rest. + +**Savings: 88% reduction in infrastructure costs** + +### Prerequisites + +Branch environments require: + +* **HTTP/HTTPS communication only** - Non-HTTP components (databases, message queues, gRPC) remain shared across all branches +* **Header propagation** - Your application must forward OpenTelemetry `tracestate` headers in service-to-service calls + +**Important:** While Lapdev automatically injects the tracestate header for incoming requests, your application code must forward headers in service-to-service HTTP calls for routing to work correctly. + +### How It Works + +#### The Shared Environment + +One shared environment runs all your services: + +* Contains a complete copy of all workloads +* Shared by all developers +* Acts as the foundation for all branch environments + +#### Your Branch Environment + +When you create a branch, you specify which service(s) you're modifying: + +* Only those services run as branched workloads alongside the shared copies in the same namespace +* Everything else routes to the shared environment + +#### Traffic Routing Overview + +When someone accesses your branch [preview URL](../preview-url.md), Lapdev routes traffic intelligently: + +* Requests to services you've modified → route to your branch version +* Requests to all other services → route to the shared environment + +**Example:** If Alice's branch modifies only the `api` service, then requests for `api` go to her branch while `web`, `worker`, and all other services use the shared environment. + +#### Routing Mechanism + +**1. Header Injection** + +When a request enters through a [preview URL](../preview-url.md), Lapdev automatically injects an OpenTelemetry `tracestate` header that identifies which branch the request belongs to. + +**2. Header Propagation (Your Responsibility)** + +Your application must forward the `tracestate` header in all HTTP service-to-service calls. + +**How to implement:** + +Most HTTP clients automatically forward headers when you pass them explicitly. For example: + +```javascript +// Forward all incoming headers to downstream services +const response = await axios.get('http://other-service:8080/api', { + headers: request.headers // This includes the tracestate header +}); +``` + +**Framework-specific approaches:** + +✅ **Automatic with these frameworks:** +- Spring Cloud Sleuth (auto-propagates OpenTelemetry headers) +- OpenTelemetry-instrumented applications (SDK handles propagation) + +⚠️ **Manual forwarding required:** +- Express.js, Fastify, Koa (pass `request.headers` to HTTP client) +- Go HTTP clients (copy headers from incoming request) +- Any custom HTTP client implementation + +**3. Routing Decision** + +The Lapdev Sidecar Proxy (automatically injected by Lapdev into each pod in managed environments) sits in front of your workload traffic: + +1. Intercepts incoming requests headed to your service +2. Reads the `tracestate` header to identify the branch +3. Checks if the target service has a branch override for this branch +4. If a Devbox intercept is active for this branch/service, routes to the developer’s local process; otherwise routes to the branched service if present +5. Falls back to the shared environment if no branch-specific target exists +6. The header continues downstream, so the next hop can repeat the decision + +> **Note:** The sidecar proxy is automatically added when Lapdev creates your environment. No manual configuration needed. For more details, see [Traffic Routing Architecture](traffic-routing-architecture.md). + +### Key Benefits + +* **Cost-effective:** Only run what you're changing (up to 88% infrastructure reduction) +* **Instant creation:** Branch environments are created instantly - branched workloads are only deployed when you modify them +* **Realistic testing:** Test your changes against real production-like services, not mocks +* **No conflicts:** Multiple developers can work on different services simultaneously + +### Limitations + +#### 1. Header Propagation Required + +Your application must forward the `tracestate` header in service-to-service HTTP calls, otherwise routing will break mid-request chain. See the **Routing Mechanism** section above for implementation guidance. + +#### 2. HTTP/HTTPS Traffic Only + +Branch routing works exclusively for HTTP/HTTPS communication. Other protocols are always shared: + +* **Shared across all branches:** Databases, message queues, gRPC services, TCP connections +* **Implication:** Be careful with database schema changes or queue message format changes + +#### 3. Stateful Services Considerations + +Services with persistent state (databases, caches) are shared across branches: + +* Database writes from one branch affect all other branches +* Redis/Memcached entries are visible to all branches +* File uploads or background jobs may interfere between branches + +**Best Practice:** Use branch-specific prefixes for cache keys and database records when testing data modifications. + +### Troubleshooting + +#### Traffic not routing correctly to my branch + +**Symptoms:** +- Changes aren't visible when accessing your branch preview URL +- Some requests use your branch version, others use the shared version (inconsistent routing) + +**Possible causes:** + +1. **Headers not propagated:** Your services aren't forwarding the `tracestate` header in HTTP calls +2. **Service not branched:** The service is not actually deployed in your branch environment +3. **Wrong preview URL:** You're accessing the shared environment URL instead of your branch URL + +**Debug steps:** + +* Verify your HTTP clients forward incoming headers to downstream services (see example above) +* Check the Lapdev dashboard to confirm which services are branched in your environment +* Check sidecar proxy logs to see routing decisions +* Look for the `tracestate` header in your service logs - if it's missing after the first hop, headers aren't being propagated + +#### Database changes from my branch affect other developers + +This is **expected behavior**. Branch environments share databases and other stateful services. + +**Solutions:** + +* Use branch-specific database prefixes or namespaces +* Test with read-only database operations +* Use separate test databases per branch (requires manual configuration) + +### Learn More + +**Related Architecture Documentation:** +* [Traffic Routing Architecture](traffic-routing-architecture.md) - Detailed explanation of how routing works across all environment types +* [Architecture Overview](README.md) - Overall system design and component interactions + +**Core Concepts:** +* [Environment](../environment.md) - Understanding Personal, Shared, and Branch environments +* [Preview URL](../preview-url.md) - HTTPS access to your services +* [App Catalog](../app-catalog.md) - Blueprint for your application + +**How-To Guides:** +* [Create Lapdev Environment](../../how-to-guides/create-lapdev-environment.md) - How to create branch environments +* [Use Preview URLs](../../how-to-guides/use-preview-urls.md) - Access and share your branch environment diff --git a/docs/core-concepts/architecture/branch-environment-architecture.mdx b/docs/core-concepts/architecture/branch-environment-architecture.mdx new file mode 100644 index 0000000..5515baa --- /dev/null +++ b/docs/core-concepts/architecture/branch-environment-architecture.mdx @@ -0,0 +1 @@ +export { default } from "./branch-environment-architecture.md"; diff --git a/docs/core-concepts/architecture/index.mdx b/docs/core-concepts/architecture/index.mdx new file mode 100644 index 0000000..72567f1 --- /dev/null +++ b/docs/core-concepts/architecture/index.mdx @@ -0,0 +1 @@ +export { default } from "./README.md"; diff --git a/docs/core-concepts/architecture/traffic-routing-architecture.md b/docs/core-concepts/architecture/traffic-routing-architecture.md new file mode 100644 index 0000000..d77e133 --- /dev/null +++ b/docs/core-concepts/architecture/traffic-routing-architecture.md @@ -0,0 +1,133 @@ +# Traffic Routing Architecture + +This document explains how traffic flows through Lapdev for both preview URLs and local development with Devbox. + +### Overview + +Lapdev handles two main traffic patterns: + +1. **[Preview URL](../preview-url.md) Traffic** - External users accessing your development environment via browser +2. **[Devbox](../devbox.md) Traffic** - Developers debugging locally while accessing cluster services + +Both patterns use secure tunnels through the Lapdev cloud service, eliminating the need for VPNs or firewall changes. + +### Architecture Diagram + + + +### Components + +#### In Your Cluster + +**Lapdev-Kube-Manager** + +* Orchestrates environment creation and management +* Maintains control-plane connection to Lapdev cloud service (route updates, heartbeats) +* Pushes branch/service routing tables and Devbox intercept metadata to sidecars + +**Lapdev [Environment](../environment.md) (Namespace)** + +* Contains your replicated workloads +* Each environment is isolated in its own namespace +* Multiple environments can coexist in the same cluster + +**App Workload Pod** + +* Your application container(s) +* Runs unmodified in Personal and Shared environments +* [Branch environments](branch-environment-architecture.md) may require header propagation (see Branch Environment Routing below) + +**Lapdev Sidecar Proxy** + +* Automatically injected into each pod in Lapdev environments +* Routes traffic for branch environments based on tracestate headers +* Handles Devbox intercepts directly (opens tunnels to Lapdev cloud and shuttles pod traffic over them) +* Falls back to in-cluster service when no intercept is active + +#### External Components + +**Lapdev Cloud Service** + +* Routes preview URL traffic to your cluster +* Manages secure websocket tunnels +* Handles authentication for preview URLs + +**Devbox (Developer Machine)** + +* CLI tool running on developer's laptop +* Establishes secure tunnel to cluster +* Intercepts traffic for specific services +* Provides transparent access to in-cluster services + +### Traffic Flows + +#### Preview URL Traffic + +When a user accesses a Preview URL: + +1. **Browser** → Request to automatically generated HTTPS URL +2. **Lapdev Cloud Service** → Authenticates request (if access control enabled) +3. **Lapdev Cloud Service** → Routes through WebSocket tunnel to kube-manager +4. **Kube-Manager** → Forwards to appropriate environment namespace +5. **Sidecar Proxy** → Routes to target service based on environment type: + - **Personal/Shared:** Routes directly to service + - **Branch:** Checks tracestate header and routes to branched or shared version +6. **Service** → Processes request and returns response + +The response flows back through the same path to the user's browser. + +#### Devbox Intercept Traffic + +When a developer intercepts a service with Devbox: + +1. **Developer runs** `lapdev devbox connect` and enables intercept in dashboard +2. **Devbox CLI** → Establishes secure tunnel: `Local machine → Lapdev Cloud → Sidecar Proxy` + - Kube-Manager stays on the control plane (publishing intercept metadata and optional direct-connect hints) but is **not** on the data path. +3. **Sidecar Proxy** for the intercepted pod: + - Receives routing rules from Kube-Manager + - Opens the tunnel to Lapdev Cloud using the intercept token + - Forwards intercepted traffic to the developer’s local machine +4. **Local service** → Developer's code running on localhost processes the request +5. **Response** flows back through the same tunnel to the pod + +When no intercept is active, traffic routes normally to the in-cluster service. + +#### Branch Environment Routing + +Branch environments use intelligent routing based on tracestate headers: + +1. **Request enters** through Preview URL with branch-specific tracestate header (auto-injected by Lapdev) +2. **Sidecar Proxy** reads tracestate header to identify the branch +3. **Routing decision:** + - Service modified in branch? → Route to branch version + - Service not modified? → Route to shared environment version +4. **Header propagation:** Application must forward headers to downstream services +5. **Next hop:** Process repeats at each service + +This enables multiple developers to test different modifications simultaneously without conflicts. + +> **Important:** Branch environment routing requires your application to propagate the tracestate header in HTTP calls. See [Branch Environment Architecture](branch-environment-architecture.md) for implementation details. + +### Component Roles Summary + +| Component | Purpose | When Used | +|-----------|---------|-----------| +| **Sidecar Proxy** | Routes traffic based on environment type and headers | All environments | +| **Kube-Manager** | Orchestrates environments and pushes routing/intercept state to sidecars | Always running | +| **Lapdev Cloud** | Routes external traffic and manages authentication | Preview URLs and Devbox | + +### Learn More + +**Specialized Routing Documentation:** +* [Branch Environment Architecture](branch-environment-architecture.md) - Tracestate header propagation, routing mechanism, and troubleshooting +* [Architecture Overview](README.md) - Overall system design and component interactions + +**Core Concepts:** +* [Environment](../environment.md) - Personal, Shared, and Branch environment types +* [Devbox](../devbox.md) - Local development with cluster connectivity +* [Preview URL](../preview-url.md) - HTTPS access to your services + +**How-To Guides:** +* [Use Preview URLs](../../how-to-guides/use-preview-urls.md) - Create and manage preview URLs +* [Local Development with Devbox](../../how-to-guides/local-development-with-devbox.md) - Set up traffic interception and cluster access +* [Create Lapdev Environment](../../how-to-guides/create-lapdev-environment.md) - Set up different environment types diff --git a/docs/core-concepts/architecture/traffic-routing-architecture.mdx b/docs/core-concepts/architecture/traffic-routing-architecture.mdx new file mode 100644 index 0000000..cb8b9a0 --- /dev/null +++ b/docs/core-concepts/architecture/traffic-routing-architecture.mdx @@ -0,0 +1 @@ +export { default } from "./traffic-routing-architecture.md"; diff --git a/docs/core-concepts/cluster.md b/docs/core-concepts/cluster.md new file mode 100644 index 0000000..9a0c80f --- /dev/null +++ b/docs/core-concepts/cluster.md @@ -0,0 +1,166 @@ +# Cluster + +A **Cluster** in Lapdev refers to a Kubernetes cluster you've connected to Lapdev for managing development environments. + +Clusters can serve two roles: +- **Source Cluster** - Provides production manifests for creating App Catalogs +- **Target Cluster** - Hosts development environments + +The same cluster can serve both roles, which is the most common setup. + +### Why Connect Clusters + +Connecting your Kubernetes cluster to Lapdev enables: + +* **App Catalog Creation** - Read production manifests to define your app +* **Environment Deployment** - Run development environments in the cluster +* **Production Parity** - Ensure dev environments match production exactly +* **Centralized Management** - Manage multiple clusters from one dashboard + +### How It Works + +#### Lapdev Kube Manager + +When you connect a cluster, Lapdev deploys a lightweight controller called **lapdev-kube-manager** inside your cluster. + +The kube-manager: + +* **Establishes secure connection** - Opens a WebSocket tunnel to Lapdev API Server (TLS encrypted) +* **Reads production manifests** - Discovers Deployments, StatefulSets, ConfigMaps, Secrets, and Services +* **Creates dev environments** - Replicates selected workloads into isolated or shared namespaces +* **Handles traffic routing** - For branch environments, routes traffic to the correct version of services +* **Manages synchronization** - Keeps environments updated with production changes + +**Security Model:** + +* Connection is **outbound-only** from your cluster (no inbound firewall rules needed) +* Lapdev API Server **never** accesses your cluster's API server directly +* Kube-manager has **read access** to production namespaces +* Kube-manager has **full access** to Lapdev-managed namespaces only +* Cannot access other cluster resources or namespaces + +#### Token-Based Authentication + +Each cluster is authenticated using a unique token: + +* Generated when you create the cluster in Lapdev dashboard +* Used by kube-manager to establish the secure tunnel +* Stored as a Kubernetes Secret in your cluster + +### Cluster Roles + +#### Source Cluster + +A **source cluster** is where Lapdev reads production manifests to create App Catalogs. + +* Typically your **staging** or **production** cluster +* Contains the workloads you want to replicate in dev environments +* Kube-manager reads manifests but doesn't modify them +* You can designate any cluster as a source + +**Common setups:** +* Use production cluster as source (safest, always up-to-date) +* Use staging cluster as source (if staging mirrors production) +* Use dedicated "template" cluster (for controlled rollout) + +#### Target Cluster + +A **target cluster** is where Lapdev deploys development environments. + +* Can be the same as your source cluster or different +* Hosts all Personal, Shared, and Branch environments +* Kube-manager creates and manages namespaces here +* Choose based on resource availability and cost + +**Common setups:** +* Same cluster as source (simplest, most common) +* Dedicated dev cluster (isolates dev from production) +* Multiple dev clusters (for different teams or regions) + +### Cluster Permissions + +Cluster permissions control **which types of environments** can be deployed to a cluster. + +#### Personal Environments Permission + +When enabled, developers can create **Personal Environments** in this cluster. + +* Each developer gets fully isolated namespaces +* Higher resource usage (every dev runs complete app) +* Best for: staging/dev clusters with sufficient resources + +**Enable when:** +* Developers need full isolation for testing +* Cluster has resources for multiple complete environments +* You want individual developers to experiment freely + +#### Shared Environments Permission + +When enabled, admins can create **Shared Environments** and developers can create **Branch Environments** in this cluster. + +* Shared environments are team-wide baselines +* Branch environments are lightweight personal modifications +* Lower resource usage (shared baseline + modifications only) +* Best for: cost-efficient development at scale + +**Enable when:** +* Team uses branch environment workflow +* You want cost-efficient development environments +* Multiple developers work on the same application + +#### Permission Use Cases + +| Cluster Type | Personal | Shared | Use Case | +|-------------|----------|---------|-----------| +| Dev Cluster | ✅ | ✅ | Full flexibility for developers | +| Staging Cluster | ❌ | ✅ | Team-wide testing baseline only | +| Testing Cluster | ✅ | ❌ | Isolated testing environments | +| Production | ❌ | ❌ | Source only, no dev environments | + +> You can change these permissions at any time in the cluster settings. + +### Relationship to Other Components + +Clusters are foundational to Lapdev's workflow: + +``` +Source Cluster + ↓ (provides manifests) +App Catalog + ↓ (blueprint) +Target Cluster + ↓ (deploys to) +Environments (Personal/Shared/Branch) +``` + +| Component | Role with Clusters | +|-----------|-------------------| +| **Cluster** | Provides infrastructure and manifests | +| **App Catalog** | Created by reading manifests from source cluster | +| **Environment** | Deployed into target cluster using App Catalog | +| **Kube Manager** | Runs inside cluster, bridges to Lapdev API | + +### Multiple Clusters + +You can connect multiple clusters to Lapdev for different purposes: + +**Common multi-cluster setups:** + +* **Source + Multiple Targets:** + - Production cluster (source only) + - Dev cluster A (target for team A) + - Dev cluster B (target for team B) + +* **Regional Separation:** + - US cluster (source + target) + - EU cluster (target only, for EU developers) + +* **Environment Segregation:** + - Staging cluster (source + shared environments) + - Dev cluster (personal environments only) + +Each cluster appears in your Lapdev dashboard with its own status, permissions, and environments. + +### Next Steps + +Ready to connect your cluster? See [**Connect Your Kubernetes Cluster**](../how-to-guides/connect-your-kubernetes-cluster.md) for step-by-step instructions. diff --git a/docs/core-concepts/cluster.mdx b/docs/core-concepts/cluster.mdx new file mode 100644 index 0000000..b3d24ff --- /dev/null +++ b/docs/core-concepts/cluster.mdx @@ -0,0 +1 @@ +export { default } from "./cluster.md"; diff --git a/docs/core-concepts/devbox.md b/docs/core-concepts/devbox.md new file mode 100644 index 0000000..c8dd0b7 --- /dev/null +++ b/docs/core-concepts/devbox.md @@ -0,0 +1,54 @@ +# Devbox + +**Devbox** is Lapdev’s local development companion — a CLI tool that connects your local machine directly to your Lapdev Kubernetes environment. + +It bridges the gap between _local iteration speed_ and _production realism_. + +### Why Devbox + +Developing in Kubernetes usually means slow feedback loops: rebuild, redeploy, and wait for pods to restart just to test a single change.\ +Devbox changes that by letting you **run your code locally while still connected to your real cluster environment**. + +This means: + +* You can test your app against live in-cluster services (databases, APIs, caches). +* Cluster traffic can be routed to your local process for real-time debugging. +* You no longer need complex port-forwarding, VPNs, or separate mock setups. + +### How It Works + +When you start Devbox inside a Lapdev environment: + +1. It authenticates with Lapdev and connects to your active environment’s namespace. +2. It synchronizes local and cluster networking rules. +3. It can optionally intercept service traffic and forward it to your local process. +4. It provides seamless access to other workloads and in-cluster dependencies. + +> 💡 Devbox doesn't replace Kubernetes — it _extends_ it for developers.\ +> You keep your production topology and cluster configuration, but develop with local speed. + +### Core Capabilities + +* **Intercept Service Traffic:** Redirect in-cluster service requests to your local code. +* **In-Cluster Connectivity:** Access internal APIs and databases as if you were inside the pod. +* **Seamless IDE Debugging:** Run locally, attach debuggers, and see live logs. +* **Compatible with All Environment Types:** Works with personal, shared, and branch environments. + +### How It Fits in the Lapdev Model + +| Concept | Role | +| --------------- | ---------------------------------------------------------------------- | +| **App Catalog** | Defines what your app consists of (the workloads). | +| **Environment** | A running instance of that app in Kubernetes. | +| **Devbox** | Bridges your local machine with that environment for live development. | + +### When to Use Devbox + +* When you need fast feedback without redeploying to Kubernetes. +* When debugging complex issues that depend on real cluster state. +* When integrating or testing locally while keeping the rest of the system in-cluster. + +### Next Steps + +Ready to use Devbox? See [**Local Development with Devbox**](../how-to-guides/local-development-with-devbox.md) for setup instructions. + diff --git a/docs/core-concepts/devbox.mdx b/docs/core-concepts/devbox.mdx new file mode 100644 index 0000000..415c022 --- /dev/null +++ b/docs/core-concepts/devbox.mdx @@ -0,0 +1 @@ +export { default } from "./devbox.md"; diff --git a/docs/core-concepts/environment.md b/docs/core-concepts/environment.md new file mode 100644 index 0000000..a9b1608 --- /dev/null +++ b/docs/core-concepts/environment.md @@ -0,0 +1,140 @@ +# Environment + +The **Lapdev Kubernetes Environment** is the foundation of Lapdev’s platform. It’s where you **develop**, **test**, and **demo** your applications — all directly within Kubernetes. + +Lapdev’s unique strength is that it manages the **entire environment lifecycle** — from creation to synchronisation — in a seamless, automated way. You don’t need to manually manage YAML files, sync resources, or worry about configuration drift. + +### How It Works + +Environments are created from **App Catalogs**, which serve as blueprints for your application. + +When you create an environment: + +1. Select an existing App Catalog that defines your app's workloads +2. Choose an environment type (Personal, Shared, or Branch) +3. Lapdev deploys the workloads: + - Personal/Shared: into a dedicated namespace + - Branch: into the shared base environment's namespace +4. ConfigMaps, Secrets, and Services are automatically included + +The result: a fully functional copy of your app that mirrors production exactly, with zero manual YAML management. + +> App Catalogs are created by reading production manifests directly from your cluster. Learn more about [**App Catalogs**](app-catalog.md). + +### Environment Types + +Lapdev supports three environment types, depending on your workflow and cost requirements: + +* **Personal Environments** — fully isolated, ideal for independent development +* **Shared Environments** — team-wide baseline, ideal for integration testing +* **Branch Environments** — lightweight personal modifications on shared foundation, ideal for large teams + +All types support **Devbox** for local development and debugging. + +#### 1. Personal Environments + +A **Personal Environment** is a completely isolated workspace for a single developer. + +Each personal environment runs in its **own Kubernetes namespace**, so: + +* One developer’s work never affects another’s. +* Multiple environments can be created per developer (e.g., one per feature or bug fix). +* You can switch between environments freely. + +Because every personal environment contains a **full set of workloads**, it guarantees total isolation — but that comes with higher resource usage. + +Lapdev helps mitigate this cost by allowing you to **pause and resume environments on demand**, automatically scaling down resources when you’re not using them. + +**Best for:**\ +Developers who need full isolation or want to test complex changes safely. + +#### 2. Shared Environments + +A **Shared Environment** is a team-wide baseline that runs a complete version of your app. + +* Created and managed by admins +* Accessible by all team members +* Acts as the foundation for branch environments +* Ideal for integration testing or staging setups + +**Best for:**\ +Teams who need a stable, shared environment for testing or as a baseline for branch environments. + +#### 3. Branch Environments + +**Branch Environments** provide a cost-effective way to test changes by building on top of a shared environment. + +When you create a branch environment: + +* Initially, it references all workloads from the shared environment +* Only when you modify a service does Lapdev create a branched copy +* The branched copy runs alongside the shared version inside the shared environment's namespace, and Lapdev handles routing between them +* Unmodified services continue using the shared environment +* Multiple developers can work simultaneously without conflicts + +This Git-like branching model means: + +* **Near-instant creation** - no waiting for full environment deployment +* **Minimal cost** - only run what you're changing (up to 88% infrastructure reduction) +* **Realistic testing** - test against real production-like services, not mocks + +**How it works:** + +Lapdev uses intelligent traffic routing to direct requests to your branched services while everything else uses the shared baseline. This enables multiple developers to safely test changes in parallel. + +**Requirements:** + +* Services must communicate via HTTP/HTTPS +* Your application must forward `tracestate` headers in service calls +* Non-HTTP components (databases, message queues) remain shared + +**Best for:**\ +Large teams doing frequent feature development with cost efficiency in mind. + +> For technical details on routing, sidecar proxies, and header propagation, see [**Branch Environment Architecture**](architecture/branch-environment-architecture.md). + +### Local Development with Devbox + +All environment types support **Devbox**, Lapdev's CLI tool for local development. + +Devbox allows you to: + +* **Intercept cluster traffic** to your local machine for real-time debugging +* Use your **local IDE** to edit, build, and debug as if running in the cluster +* **Access in-cluster resources** (databases, caches, internal APIs) transparently + +When connected via Devbox, your local process integrates seamlessly with your environment. Other services automatically route traffic to your local instance, combining fast local iteration with accurate cluster integration. + +> Learn more about [**Devbox**](devbox.md) and how to use it. + +### Preview URLs + +Every Lapdev environment can expose one or more **Preview URLs** — public HTTPS endpoints for accessing or sharing your running services. + +Preview URLs are created **per service**. You choose which service to expose, and Lapdev automatically handles DNS, certificates, routing, and security. + +When requests come in through a Preview URL in a branch environment, Lapdev automatically manages the **`tracestate` header** to route traffic to the correct branch environment. + +> Learn more about [**Preview URLs**](preview-url.md). + +### Comparison + +| Feature | Personal Environment | Shared Environment | Branch Environment | +| --------------- | ------------------------------------- | ------------------------------------ | ------------------------------- | +| **Isolation** | Fully isolated (per namespace) | Shared by all team members | Shared base, routed isolation | +| **Cost** | Highest | Medium | Lowest | +| **Ideal For** | Deep testing, multi-service debugging | Integration testing, team staging | Large teams, frequent branching | +| **Performance** | Slower to start, full workloads | Full workloads, shared infrastructure| Instant setup, minimal overhead | +| **Access** | Single developer | Entire team | Single developer (with shared base) | + +### Summary + +Lapdev Kubernetes Environments let you: + +* Reproduce production exactly, without manual setup +* Stay notified when production changes - sync with one click when you're ready +* Choose between **full isolation**, **team-wide sharing**, or **cost-efficient branching** +* Use **Devbox** for fast, local-style development in Kubernetes +* Collaborate efficiently with consistent, shareable environments + +Whether you're developing alone, testing as a team, or scaling across a large organization, Lapdev ensures every environment stays consistent, efficient, and production-accurate. diff --git a/docs/core-concepts/environment.mdx b/docs/core-concepts/environment.mdx new file mode 100644 index 0000000..78fdf5a --- /dev/null +++ b/docs/core-concepts/environment.mdx @@ -0,0 +1 @@ +export { default } from "./environment.md"; diff --git a/docs/core-concepts/preview-url.md b/docs/core-concepts/preview-url.md new file mode 100644 index 0000000..ac8f2e3 --- /dev/null +++ b/docs/core-concepts/preview-url.md @@ -0,0 +1,68 @@ +# Preview URL + +A **Preview URL** is a unique, automatically generated HTTPS endpoint that lets you access services in your Lapdev environment directly from the web — without any manual DNS or Ingress configuration. + +You can create Preview URLs for any environment type (**personal**, **shared**, or **branch**), exposing specific services for access and sharing.\ +This makes it easy to preview changes, share work with teammates, and test production-like behavior in real time. + +### Why Preview URLs + +In traditional Kubernetes setups, exposing your app for testing often means: + +* Creating or editing Ingress rules +* Configuring DNS records +* Managing TLS certificates +* Waiting for ops to approve changes + +That’s slow, error-prone, and not scalable for dozens of developers or short-lived environments. + +Lapdev solves this with **automatic Preview URLs** — secure, per-environment endpoints that “just work.” + +### How It Works + +When you create a Preview URL for a service in your environment, Lapdev: + +1. Detects the service you want to expose (e.g. `frontend`, `api`, `gateway`) +2. Automatically generates a unique HTTPS domain for that service +3. Configures TLS certificates automatically — no `cert-manager`, DNS setup, or manual YAML needed +4. Routes traffic through Lapdev's managed proxy layer directly to your service inside the cluster + +All routing and TLS termination are handled by Lapdev's control plane, so your cluster stays secure and simple. + +Each Preview URL is unique and automatically managed by Lapdev, making it safe to share with your team. + +### Access Control + +Preview URLs default to **Organization** access, but you can configure per Preview URL: + +* **Organization (default):** Only members of your Lapdev organization can access (after login) +* **Public:** Anyone with the link can access without authentication + +Access settings are managed per Preview URL in the Lapdev dashboard. + +### Benefits + +✅ **Instant access:** No waiting for ops or configuring ingress.\ +✅ **Secure by default:** Auto-managed HTTPS and certificates.\ +✅ **Shareable:** Send links to PMs, QA, or teammates easily.\ +✅ **Isolated:** Each environment’s URL maps only to its own workloads.\ +✅ **Consistent:** Same domain and routing system across all environments. + +### How It Relates to Other Lapdev Components + +| Component | Role | +| --------------- | --------------------------------------------------------------------------- | +| **App Catalog** | Defines which workloads are part of the app. | +| **Environment** | A running instance of the app in Kubernetes. | +| **Preview URL** | Provides web access to that environment — with automatic routing and HTTPS. | + +### Usage Pattern + +Preview URLs make collaboration effortless: + +* Create an environment and expose the services you want to share +* Lapdev generates secure HTTPS URLs automatically +* Share URLs with teammates, QA, or stakeholders instantly +* No infrastructure changes, ingress configuration, or firewall rules needed + +> For step-by-step instructions, see [**Use Preview URLs**](../how-to-guides/use-preview-urls.md). diff --git a/docs/core-concepts/preview-url.mdx b/docs/core-concepts/preview-url.mdx new file mode 100644 index 0000000..5bb3d59 --- /dev/null +++ b/docs/core-concepts/preview-url.mdx @@ -0,0 +1 @@ +export { default } from "./preview-url.md"; diff --git a/docs/docs.json b/docs/docs.json new file mode 100644 index 0000000..49e93b9 --- /dev/null +++ b/docs/docs.json @@ -0,0 +1,44 @@ +{ + "$schema": "https://mintlify.com/docs.json", + "name": "Lapdev Docs", + "description": "Guides, how-tos, and core concepts for operating Lapdev developer environments on Kubernetes.", + "theme": "maple", + "colors": { + "primary": "#3B82F6", + "light": "#F8FAFC", + "dark": "#0F172A" + }, + "navigation": { + "groups": [ + { + "group": "Introduction", + "pages": [ + "index" + ] + }, + { + "group": "How-to guides", + "pages": [ + "how-to-guides/connect-your-kubernetes-cluster", + "how-to-guides/create-an-app-catalog", + "how-to-guides/create-lapdev-environment", + "how-to-guides/local-development-with-devbox", + "how-to-guides/use-preview-urls" + ] + }, + { + "group": "Core concepts", + "pages": [ + "core-concepts/architecture/index", + "core-concepts/architecture/traffic-routing-architecture", + "core-concepts/architecture/branch-environment-architecture", + "core-concepts/cluster", + "core-concepts/app-catalog", + "core-concepts/environment", + "core-concepts/devbox", + "core-concepts/preview-url" + ] + } + ] + } +} \ No newline at end of file diff --git a/docs/how-to-guides/connect-your-kubernetes-cluster.md b/docs/how-to-guides/connect-your-kubernetes-cluster.md new file mode 100644 index 0000000..9f49bb6 --- /dev/null +++ b/docs/how-to-guides/connect-your-kubernetes-cluster.md @@ -0,0 +1,66 @@ +# Connect Your Kubernetes Cluster + +This guide walks you through connecting your Kubernetes cluster to **Lapdev**, so Lapdev can manage development environments inside your cluster. + +> Read [**Cluster**](../core-concepts/cluster.md) to understand cluster roles, permissions, and how kube-manager works. + +### Create a Cluster in the Lapdev Dashboard + +1. Go to the Lapdev dashboard: [https://app.lap.dev](https://app.lap.dev) +2. Navigate to **Clusters** → click **Create New Cluster**. +3. Enter a **name** to identify your cluster (e.g. `staging-cluster` or `dev-cluster`). +4. After creating it, you’ll see an **authentication token** and **installation instructions** for your cluster. + +Keep this token handy — it’s used to securely register your cluster with Lapdev. + +
    + +### Install the Lapdev Kube Manager + +Run the install command shown in the dashboard, or apply the manifest manually: + +```bash +kubectl apply -f "https://get.lap.dev/install/lapdev-kube-manager.yaml?token=" +``` + +Replace `` with the token generated when you created the cluster. The manifest creates the `lapdev` namespace (if needed), configures the required RBAC, and deploys the `lapdev-kube-manager` controller that securely connects your cluster to Lapdev. + +### Configure Cluster Permissions + +After the cluster is connected, configure which environment types can be deployed to this cluster: + +1. Go to the cluster details page in the dashboard +2. Find the **Permissions** section +3. Toggle permissions based on your needs: + * **Personal Environments** - Allow developers to create isolated environments + * **Shared Environments** - Allow team-wide baseline and branch environments + +You can change these settings at any time. + +> Learn more about cluster permissions and use cases in [**Cluster**](../core-concepts/cluster.md). + +### Verify Connection + +Once the installation is complete: + +* Go back to the **Clusters** page in the Lapdev dashboard. +* Your cluster should appear in the list with a status of **Active**. + +If the status doesn’t update after a minute, double-check that: + +* The `lapdev-kube-manager` pod is running: + + ```bash + kubectl get pods -n lapdev + ``` +* Your network allows outbound HTTPS connections to `api.lap.dev`. + +### Next Steps + +Your cluster is now connected to Lapdev! 🎉 + +You can start: + +* Creating an [App Catalog](create-an-app-catalog.md) from your cluster's workloads +* Creating [Environments](create-lapdev-environment.md) from your App Catalog +* Learn more about [Cluster concepts](../core-concepts/cluster.md) and architecture diff --git a/docs/how-to-guides/connect-your-kubernetes-cluster.mdx b/docs/how-to-guides/connect-your-kubernetes-cluster.mdx new file mode 100644 index 0000000..70a755c --- /dev/null +++ b/docs/how-to-guides/connect-your-kubernetes-cluster.mdx @@ -0,0 +1 @@ +export { default } from "./connect-your-kubernetes-cluster.md"; diff --git a/docs/how-to-guides/create-an-app-catalog.md b/docs/how-to-guides/create-an-app-catalog.md new file mode 100644 index 0000000..14e091b --- /dev/null +++ b/docs/how-to-guides/create-an-app-catalog.md @@ -0,0 +1,74 @@ +# Create an App Catalog + +An **App Catalog** defines which workloads make up your application — it's the blueprint Lapdev uses to create development environments. + +This guide walks you through creating an App Catalog from your connected cluster. + +> To understand why App Catalogs exist and how they fit into Lapdev, read [**App Catalog**](../core-concepts/app-catalog.md) first. + +### Prerequisites + +Before creating an App Catalog, make sure: + +* You've connected at least one **Kubernetes cluster** to Lapdev. +* The cluster you'll read workloads from shows as **Active** in the Lapdev dashboard. +* The cluster contains the workloads you want to include (e.g. your production or staging workloads). + +> Don't have a cluster connected yet? See [**Connect Your Kubernetes Cluster**](connect-your-kubernetes-cluster.md). + +> 💡 The same cluster can be used later for both reading workloads **and** deploying environments. + +### Open the Cluster in the Dashboard + +1. Go to [https://app.lap.dev](https://app.lap.dev). +2. Navigate to the **Clusters** tab. +3. Click the cluster you want to use as the **source**. + +### Select Workloads + +Once inside the cluster details page: + +1. Lapdev lists all **workloads** (Deployments, StatefulSets, DaemonSets, etc.) discovered in the cluster. +2. You can: + * Filter workloads by **Namespace** or **Workload Type**. + * Optionally check **Show System Workloads** to include system components. +3. Select the workloads that make up your app. + +
    + + + +### Create the App Catalog + +1. After selecting workloads, click **Create App Catalog**. +2. In the dialog: + * Enter a **Name** for the catalog (e.g. `checkout-service`, `internal-tools`). + * Optionally add a **Description**. + * Review the list of selected workloads. +3. Click **Create**. + +
    + +Lapdev will create the catalog and register it in the **App Catalogs** section of your dashboard. + +> 💡 Don’t worry if you missed something — you can edit the catalog later to add or remove workloads. + +### Verify Your App Catalog + +1. Go to the **App Catalogs** tab in the dashboard. +2. You’ll see your newly created catalog listed with: + * The **name** and **description** + * The **number of workloads** included + * The **source cluster** + +Click a catalog name to view its details. + +_Example screenshot:_ + +### Next Steps + +Once your App Catalog is ready, you can: + +* Create a [Lapdev Environment](create-lapdev-environment.md) from it +* Learn about [Environment types](../core-concepts/environment.md) +* Understand [Cluster roles](../core-concepts/cluster.md) for multi-cluster setups diff --git a/docs/how-to-guides/create-an-app-catalog.mdx b/docs/how-to-guides/create-an-app-catalog.mdx new file mode 100644 index 0000000..2bd5ece --- /dev/null +++ b/docs/how-to-guides/create-an-app-catalog.mdx @@ -0,0 +1 @@ +export { default } from "./create-an-app-catalog.md"; diff --git a/docs/how-to-guides/create-lapdev-environment.md b/docs/how-to-guides/create-lapdev-environment.md new file mode 100644 index 0000000..2a0e731 --- /dev/null +++ b/docs/how-to-guides/create-lapdev-environment.md @@ -0,0 +1,76 @@ +# Create Lapdev Environment + +A **Lapdev Environment** is a running instance of your app inside a Kubernetes cluster. + +It's created from an existing **App Catalog**, which defines which workloads make up your application. + +This guide shows how to create **personal** and **shared** environments from an App Catalog, and how to create **branch** environments from a shared environment. + +> Read [**Environment**](../core-concepts/environment.md) to understand environment types and how they work. + +### Prerequisites + +Before creating an environment: + +* You must have at least one **connected Kubernetes cluster** (Active in the Lapdev dashboard). +* You must have an existing **App Catalog** that defines your app's workloads. + + > If you haven't created one yet, see [**Create an App Catalog**](create-an-app-catalog.md). + +### Start from an App Catalog (Personal or Shared) + +1. Go to the **App Catalogs** tab in the Lapdev dashboard. +2. Select the catalog you want to use as the base for your environment. +3. Click **Create Environment**. + +
    + +### Select Environment Type (Personal or Shared) + +Lapdev supports three environment types depending on your workflow and cost needs. When you create an environment from an App Catalog you can choose between: + +#### **Personal Environment** + +* A fully isolated copy of your app, deployed into a per-developer namespace. +* Ideal for local testing, debugging, or experimentation without affecting others. + +#### **Shared Environment** + +* A single shared version of your app that multiple developers can access. +* Useful for integration testing or a team staging setup. + +Branch environments are created from an existing shared environment. See the next section for that workflow. + +### Create a Branch Environment + +Branch environments reuse an existing shared environment as their baseline. + +1. Go to the **Environments** tab in the Lapdev dashboard. +2. Open the shared environment you want to branch from. +3. Click **Create Branch Environment**. +4. Select the services you plan to modify and provide a name/description for the branch. +5. Click **Create**. + +Only the services you modify are duplicated alongside the shared workloads in the same namespace; everything else continues to run on the shared versions. + +> 🧠 Use **branch environments** for feature work — they're lightweight, fast to spin up, and cost-efficient. + +### Verify and Access Your Environment + +After creation: + +* The environment will appear in the **Environments** list. +* Lapdev automatically provisions: + * All workloads from the App Catalog + * Associated ConfigMaps, Secrets, and Services + * Kubernetes namespace and networking + +You can monitor status, logs, and sync state directly from the dashboard. + +### Next Steps + +Your environment is ready! You can now: + +* Create [Preview URLs](use-preview-urls.md) for HTTPS access to your services +* Use [Devbox](local-development-with-devbox.md) to connect locally for live debugging +* Learn more about [Environments](../core-concepts/environment.md) diff --git a/docs/how-to-guides/create-lapdev-environment.mdx b/docs/how-to-guides/create-lapdev-environment.mdx new file mode 100644 index 0000000..873ac0a --- /dev/null +++ b/docs/how-to-guides/create-lapdev-environment.mdx @@ -0,0 +1 @@ +export { default } from "./create-lapdev-environment.md"; diff --git a/docs/how-to-guides/local-development-with-devbox.md b/docs/how-to-guides/local-development-with-devbox.md new file mode 100644 index 0000000..9996bd0 --- /dev/null +++ b/docs/how-to-guides/local-development-with-devbox.md @@ -0,0 +1,87 @@ +# Local Development With Devbox + +This guide shows how to set up and use **Devbox** for local development with your Lapdev environments. + +> Read [**Devbox**](../core-concepts/devbox.md) to understand what it does and when to use it. + +### Prerequisites + +Before using Devbox, you need: + +* **Lapdev CLI installed** - Devbox is built into the `lapdev` command +* **An active Lapdev environment** - Personal, shared, or branch environment + +### Install the Lapdev CLI + +Run one of the following commands based on your operating system. + +#### Linux or macOS + +```bash +curl -fsSL https://get.lap.dev/lapdev-cli.sh | bash +``` + +This script detects your architecture, downloads the latest release, and places the `lapdev` binary in `/usr/local/bin` (or `~/.local/bin` if needed). Re-run it at any time to upgrade or pass `--version ` to pin a specific build. + +#### Windows PowerShell + +```powershell +irm https://get.lap.dev/lapdev-cli.ps1 | iex +``` + +Run the command in an elevated PowerShell window if your execution policy requires it. The installer copies `lapdev.exe` into `%LOCALAPPDATA%\Lapdev\bin` and appends that directory to your user `PATH`. Restart your shell (or VS Code terminal) after installation so the new path is picked up. + +After installing, verify everything is ready with: + +```bash +lapdev --version +``` + +### Connect to Lapdev + +First, connect your Devbox CLI to Lapdev: + +```bash +lapdev devbox connect +``` + +This establishes a secure connection between your local machine and Lapdev. + +### Set Active Environment + +After connecting, set which environment you want to work with in the Lapdev dashboard: + +1. Go to the **Environments** tab +2. Select the environment you want to use +3. Click **Set as Active Environment** + +Once set, all traffic interception and service access will route through this active environment. + +### Intercept a Service + +Once connected, you can intercept traffic for a specific workload through the Lapdev dashboard: + +1. Open your environment in the Lapdev dashboard +2. Stay on (or switch to) the **Workloads** tab +3. Locate the workload that owns the service you want to intercept (e.g., `checkout-service`) +4. Click **Start Intercept** inside that workload’s row + +Lapdev automatically mirrors each container port, reusing any overrides from your most recent intercept. If you need custom local ports after starting the intercept, use the **Edit Ports** button on the intercept card and update the mappings there. + +
    + +After enabling interception: + +* All cluster traffic for that service routes to your local machine +* You can make edits, hot-reload, or debug directly in your IDE +* Other services in the cluster remain unaffected + +### Access In-Cluster Services + +While connected via Devbox, you can access any service in your environment using their cluster DNS names (e.g., `postgres-service:5432`, `redis-service:6379`) directly from your local code. Devbox handles the tunneling automatically — no port forwarding or VPN needed. + +### Next Steps + +* Learn more about [Devbox architecture](../core-concepts/devbox.md) +* Understand [traffic routing](../core-concepts/architecture/traffic-routing-architecture.md) +* Create [preview URLs](use-preview-urls.md) to share your work diff --git a/docs/how-to-guides/local-development-with-devbox.mdx b/docs/how-to-guides/local-development-with-devbox.mdx new file mode 100644 index 0000000..c259579 --- /dev/null +++ b/docs/how-to-guides/local-development-with-devbox.mdx @@ -0,0 +1 @@ +export { default } from "./local-development-with-devbox.md"; diff --git a/docs/how-to-guides/use-preview-urls.md b/docs/how-to-guides/use-preview-urls.md new file mode 100644 index 0000000..25fe832 --- /dev/null +++ b/docs/how-to-guides/use-preview-urls.md @@ -0,0 +1,102 @@ +# Use Preview URLs + +Lapdev lets you create **Preview URLs** for your environments so you can securely access and share running services — without setting up DNS, ingress, or TLS certificates manually. + +Each Preview URL points to a specific **service** inside your environment (for example, your `frontend`, `api-gateway`, or `admin` service).\ +You can create multiple Preview URLs per environment, each targeting a different service. + +> Read [**Preview URL**](../core-concepts/preview-url.md) to understand what they are and why they're useful. + +### Prerequisites + +Before you begin: + +* You must have at least one **active environment** in Lapdev (personal, shared, or branch). +* The environment must show a status of **Active** in the Lapdev dashboard. +* Your app must expose at least one **Service** in Kubernetes. + +### Create a Preview URL + +1. Open the Lapdev dashboard: [https://app.lap.dev](https://app.lap.dev) +2. Go to the **Environments** tab. +3. Select the environment you want to create a preview for. +4. In the environment details page, scroll to the **Preview URLs** section. +5. Click **Create Preview URL**. + + _Example screenshot:_\\ +6. In the **Create Preview URL** dialog: + * Choose the **Service** you want this URL to point to (for example, `frontend`, `api-gateway`, or `admin`). + * Optionally, enter a **Description** (e.g., “Frontend QA demo”). + * Select the **Access Level** (`Organization` by default, or `Public`). +7. Click **Create**. + +
    + +Lapdev will: + +* Automatically generate a unique HTTPS domain for the selected service +* Handle routing, DNS, and TLS certificates for that URL +* Route traffic directly to the selected service inside your environment + +### View and Open Preview URLs + +Once created: + +* All Preview URLs for the environment appear in the **Preview URLs** section. +* Each entry lists: + * The **Service name** it targets + * The **URL** itself + * The **Access level** (`Organization` or `Public`) + +Click the URL to open it in your browser. + +### Share the Preview URL + +You can safely share a Preview URL with: + +* Teammates or QA engineers for quick testing +* PMs or designers for feature reviews +* Automated test systems for integration runs + +Just copy and share the link — no VPN, firewall, or cluster access needed. + +> 💡 Tip: You can create multiple Preview URLs in the same environment if your app exposes multiple services (e.g., `frontend`, `admin`, `gateway`). + +### Manage Access Control + +Each Preview URL has its own access policy. + +To update it: + +1. In the **Preview URLs** section, click the settings icon next to a URL. +2. Choose who can access it: + * **Organization (recommended, default):** Only authenticated Lapdev users can view. + * **Public:** Anyone with the link can view. + * _(Coming soon)_ **Custom rules** for organization-level access. +3. Click **Save**. + +> 🔒 Use organization access for internal branches or unreleased features. + +### Delete a Preview URL + +To remove a Preview URL: + +1. Go to the **Preview URLs** section of your environment. +2. Click the **Delete** icon next to the URL. +3. Confirm deletion. + +Deleting a Preview URL does **not** affect the environment or its workloads — it only removes that specific public endpoint. + +### Troubleshooting + +| Issue | Possible Cause | Solution | +| ------------------------ | ----------------------------------------- | -------------------------------------------------------- | +| Service not listed | The service has no exposed port | Check that the Kubernetes Service defines a valid `port` | +| Preview URL doesn’t open | Service not ready or endpoint unreachable | Verify pods and services are running | +| HTTPS warning | Certificate still propagating | Wait 30–60 seconds; Lapdev handles TLS automatically | + +### Next Steps + +* Learn more about [Preview URLs](../core-concepts/preview-url.md) and how they work internally +* Use [Devbox](local-development-with-devbox.md) for real-time debugging connected to your environment +* Explore [App Catalogs](create-an-app-catalog.md) to define which workloads appear in your environments diff --git a/docs/how-to-guides/use-preview-urls.mdx b/docs/how-to-guides/use-preview-urls.mdx new file mode 100644 index 0000000..b1a21f3 --- /dev/null +++ b/docs/how-to-guides/use-preview-urls.mdx @@ -0,0 +1 @@ +export { default } from "./use-preview-urls.md"; diff --git a/docs/images/architecture.svg b/docs/images/architecture.svg new file mode 100644 index 0000000..331d63b --- /dev/null +++ b/docs/images/architecture.svg @@ -0,0 +1,8 @@ + + + eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nO1921LjWLbte39FRu7H7vReN936xH4w2Fx0XCKRnFx1MDAwNlx1MDAxYjAnOjJs2Vxi3ylssKVcdTAwMWT972eOuVx1MDAwNDYkJoFcdTAwMDSK7tNVUWVb0rrPMeZlzSX+9y+fPn2eZ1x1MDAxN73Pf//0ubdM2qN+97K9+Pw3XFy/7l3O+tNcdN1S/Hs2vbpM+Mnz+fxi9vf//u/2xUUp7c870+mwlEzHtlhv1Fx1MDAxYvcm81x1MDAxOT34f+n3p0//y/9fa6g/bqc9fpgvr9pxlLl/NZ5OuE3pO4F0fOm5t0/0Z1x1MDAxNWpr3uvS7bP2aNZb3cGlz9W41SnLpHvZaF5e1HeHnerXRWPV7Fl/NDqcZyPu0mxKI1/dm80vp8Pecb87P79cdTAwMTn+2vVNpS6nV+n5pDfD0OXt1elFO+nPM1xcXHUwMDEz4vZqe5JyXHUwMDFkqytLPCFdUXJV4CvluDJwhXd7XHUwMDFiXHUwMDE1eFqUXHUwMDAy41x1MDAxODdQwtPa9+91bHs6ml6iY/PL9mR20b6kdVh1r9NOhin1cdJ9/LlFMWyjSs7txfNePz2f37866/Hs+472XFzfMavVQztcdTAwMTdhl4XgXHUwMDFmqym/bI97IUpMrkaj9XmbdIt5u3Ojg1x1MDAxYtU1eVpVdXXRbdulJ5FcdTAwMTBGXHUwMDA3nudcdTAwMDTOaqVG/cnwfnWjaTJ8QFpm8/b8XG7Vf77oTbr9SXpHRmx/P7uiIzw36VxiY4Rvek7Q80xXdYw88410gm7S6/ZEt7suQ4Sm3i1cYvDPl1uh+PRJXHUwMDE23/7Bn//828NIuewlcyspXHUwMDBmoEW6ciNagiCQytfCfzJawt1j9eV67+zotJxcdTAwMWTNK4E8/tGS74VcdTAwMTbxMrSIQJaMJ1XgXHUwMDEyNVxiXHUwMDEzmDtoMZ4pKS/wffpPe56437FbXHUwMDE0/Jfs4d+XI8V1XHUwMDAzwiW15PuBUUL5P8NGXHUwMDFiU9La8YzWgXD1T1x1MDAxMFxuXGI/Qiklflx1MDAxZkL/eytkN3Kkiyv/fFx1MDAwMbKMp5XytHxcdTAwMDayXHUwMDFlXHUwMDE16XlvOX9YmvUmaVbad6TWUj1ZmFx1MDAwN85g+cd0/r2uXHUwMDE3c+2Yqdjd82ZcdTAwMWad+oncjevRP1JcdTAwMTChundkmditRIAmveC6jvLU28kygaqkpWuEq1x1MDAxZWB/pe9cdTAwMGKu9Fx1MDAxY1x1MDAwN332/zXJ/2w6mVx1MDAxZvZzXHUwMDE2M3Hn6tf2uD/K7shcdTAwMDDLLk1giyyhT9ujq9m8d/n5zt3yqJ9Cmj+Pemd3xXzeJ31we3s+vVjdTai1dn/Su/x5dqaX/bQ/aY9cdTAwMWGPtkzj7e3eLJEsSWdt9Wc93GWd8HJVo9bI81x1MDAxZTh911x1MDAxM1x1MDAxZSnF1eT9XG6c41x1MDAxZvPUnU6Xrpr90c6c5VkjXHLMq4Kz256d915cdTAwMTedRIMl7flkhbpkfml/hYOMXHJXt+RcdTAwMDa+o0TgOtKR5l7PXlx1MDAwZp7KiJI2jpZcdTAwMDFJvGNcdTAwMDK9aupcdTAwMTakzoo7XG6QKmKXQChvXHIj76tebsusSm/SXHRrklL5Y28m00p4eP3Hl8Ghic5GX5yjz7fP/fPGhnpFYniZ7lxu3E3wIGkh1pZrjs2v4PHwoD84PFx1MDAxYyFLvlwifLgumVx1MDAwYvJcdTAwMWU6jC7Jd0GH9HWJLFx1MDAxNuU6T9ReOvBdstnMK5hdL1ReRCyOXHUwMDE0UjxDRn9Pee23L7q960/VyXX/cjpcdTAwMTnfmcc7KiyhW+ta5iclNu53u+v64q5cdTAwMWX7XHUwMDE1z99XbY/16zVcdTAwMTTcxqiD1JuVm9FGudI8XHUwMDFkvcGXi6P2WM8rsalO27ODpOx/vf7glqcybklcdTAwMWGh4JtcdTAwMThf+avZRVx1MDAwNa5cZlxiva4naSqU9EywXHS9r1x1MDAxMnRQXHUwMDBm6LO1azf6zCNcdTAwMWTsiDX3dlx1MDAxZLYrXHUwMDA15Z5cdTAwMWRcdTAwMWNcdTAwMGac9ld3q7NcdTAwMTVMx9/PJ9HB1zVN83m4uChvl0W/VWv+aGxccvZcdTAwMDb9Tv/kRsN8dNv1SYFcdTAwMGLZ65BcdTAwMDcklfSpXHUwMDE1r6PIrjeJXG46XGJcdTAwMTAkriRfI5Gu53iPXHUwMDA1Lp5cdTAwMWS32KgovY1Qk+Q2XHUwMDEzXHUwMDFiK/N0QzLxut/Sy9p5a8/Zalx1MDAxZXcvv+59XHTiXHUwMDBmjjXpeOTIXHSPXHUwMDE2xDyANVx1MDAxM5RcXDhTXHUwMDA0tEex9vt2pDAlx1x1MDAxN8Lz5dM0pZHGNZ5r/v+G3G9o3C/frjq9L1F7Qmronb3GRzvw9s6j2Vx1MDAxOKd0vMBox/eeXHUwMDFl2SlcdTAwMDcnf/xIx071u5+fqeOLef9q4X5065gg9piCXHUwMDE1qlx1MDAxNJgnKNjfXHUwMDA3ves/XHUwMDE00vfpqlj7ZzXKm1x1MDAxOI/rONS9YIP7+Fx1MDAxNth+RS/TXG7OzunZt+ZwtPyRnGyVvcnRwfeeXHUwMDE2d53OXHUwMDFiaW5fXk5cdTAwMTd/opu50VJ118B5X32Sh++Ss/lcZij1T+PyMD+efPtcdTAwMTF/6ybLxTxRV95cdTAwMDeHXHUwMDEyuWol3oFcdEyglXPXz3SlKFx1MDAxOWWU8KVPXCK7tj3y51mqUpD15Srqz5/mYWoy9Vx1MDAxYyPMM8TzeSZmoNq+L3s6IMuSzFx1MDAxNm2wXHUwMDBmon0jPNfxzno9X/WM13OSVzUxLUhcdTAwMWayMcUjKDE+mV3SPMPIfJw3Xlx1MDAwNSXT+XxcdTAwMTNKXrYxRkZdyVx1MDAxMKUjLENcdTAwMDb/fZhcdTAwMTin5GtidEVcdTAwMWHJl3LjLvJv61x1MDAxYq3IdTRKKk3emu9cdTAwMWHzgPJcdTAwMTEll1x1MDAxMOtcdTAwMTJkXHUwMDFkJV3hXHUwMDA33k9cdTAwMTByfENcdTAwMTax0W9cdTAwMTK8VL9WK48hS/u+cF6IrMv5Vt+C6k7Hilx1MDAwNFxui62rpNlo/Jh6leQ6XsT+8lvli9Hr5vTZNGGAflx1MDAxMSXhXHUwMDA1Tlx1MDAxMHiavFx1MDAwYlc5a1x1MDAxYp6YsfZcdTAwMDXos+RcdTAwMTFcdTAwMTP4nvDId9ZGOj9ccp5Q/utOPW6ErXWK+iRcdTAwMDJXa8dcdTAwMTVk+viG+OCnPklyREhcZoUxxvVcdTAwMDPf1z8r+lF7Nt+ejsd9XHUwMDAw5fu0P5nfn2SezTIo4bzX7lrB7N+zSGlov3jiXHUwMDAyVd+1JFbfPq1Axj9uv//jb1x1MDAwZj69Wfi5+E9iv6rvSfy3ycU2cmNcdTAwMGVccklGQGTwjGhW5H6b++Xj6kQ5h2F7vj9cdTAwMWbV5CubXGKvTn6+0SXf08ZcdTAwMGJI4khcdTAwMDOtzGkmP4dM8UBIXyuaXHLfde917Fx1MDAxNY1trUvkMD81XHUwMDEy7VxirYSW7sP+9b+lX3zYS64ue5+Oe51cdTAwMTlqnX9qXFxNJr3R+/rGv+zEm8aeld7oXHUwMDFiI0CkXHUwMDAystuensNzcprLSznJrvy037vezkeLy2T3g8PVI/7XJvCFNkJcdTAwMGLh3M178Fx1MDAxZFNyiUVJtZF3TGblRzDpXHUwMDE1kaiCXHUwMDFl/YU3PFx1MDAxZY1GX05221dX/rR+Xjs72W90zt4g0nXb4lx1MDAwM26vd3SuZo1cdTAwMWZcdTAwMTdcdTAwMTdbjezs4nQ+PCt/n7yW2+t7Snkv2rl6Wuj6LKE1d1x1MDAwNVx0QNLxtON1XFw3kYFr2m1y8cjdTbq+clx1MDAwM/9dQtdkf270KsjM1sJcdTAwMDTBM1x1MDAxMpRazajd9p3R13Nv1PrWb81cdTAwMWHb3z84Ul1k2/luoFx1MDAwMo/Up7vmNzBSXHUwMDExRdJCkVdBWlVcdTAwMDVvXHUwMDE3xXK90lOD1tLBZrSU4uHkpPeH6q/A9Jw0u99TvpXedWe6fF9de7/NN1at/v2rq/RY0IpWT49cdTAwMDJsJb1l7WrPXHUwMDFj1b5cXH+b7M+2+m2388Hx6lx0t+SRoyc0kmDJJbyDV61USWlPXHUwMDE45ZFfpjcnZbynZvWNY9RGT39cdTAwMDXXva1JPdBnw1rim/P5nin3+u0v71xyVz9w3jCm1lVnXG45zW2/XHUwMDFkiG631+m6Z4F0O1x1MDAwZV3xvXbP6Z5pfea+j+5TXHUwMDFi85tcdTAwMWPjXHTPo54+XHUwMDE5SmN9OTtcdTAwMTTdtL1/rPZcdTAwMDZuzzvo6VdO/3t91eeqkkPaTbpaXHUwMDE5T8q7Rqp2XGJoZFx1MDAwNDhcdTAwMWWpPZ9swzdMcJJINFxmlPS9pylA0n5cIkCG/Fx1MDAwN0HUY7bqodfUP755XHUwMDE3rYOGl87yvis6w8rr2aovxeuL1Ov3y951v7f41DzYf19cdTAwMWT7YMNvv8e7mSE8zyezcH1cdTAwMDPkV1xmkXeal4vysTtcdTAwMWN/c7e3u1tqclx1MDAxY77uwa03Slx1MDAxMHY9Q/DX2OFVd09uuZKMVkNgXHUwMDA0i1x1MDAwNN5cdTAwMWJu8b4oP9hR9KznrrH4v0J6cG1aPYv2w17/MOiGf/T3q2l53P6A6cFKPeI6XHUwMDFh43r0v6fHZFx1MDAxZlx1MDAxZfVcdTAwMDdHhyNUSSpslmiXnDfv7kktVyNcdTAwMDb0XHUwMDBl6Hh2frBSvkc4eo28+d/IXHUwMDBmNka4z1x1MDAxMNLf01xc75ZcdTAwMWb8XHUwMDBimn/v/ODH9ZuzcU/Z+K5RbvCMg8nzo8VhO/le1Zfellue7e1VnbODj45g4/klj0xb4brG97x7XGKGrymIyJTCPoZa2+j9XHUwMDE4XG4ukIboZ1NcdTAwMWPooyq4y2y3fT5uXHUwMDFlbu+eXsz6Xn9a+yNxnq/g9DpO3kTBbUaH73NcdTAwMWWI+/RNjIdcdTAwMDf9wdHhXGKvJH1ptCZdJlx1MDAxY3U31uIpclx1MDAxZt9cdTAwMDVcdTAwMWTPVnAyQHSUjL8/V8O5UnrPkNJ/XHUwMDExXHL3XHUwMDBiov8wJ2B8f2NaviekJmF6xvHOQfI1Md5hbTasd3dcdTAwMWHLP4am+fWDv3bDXHUwMDA0QclcdTAwMGZ0ILSnccTau5uga6QoeY7rKq209lx1MDAxY+9+x175tVx1MDAxYo4sXHUwMDE5TXYn0UggXeE+9Fx1MDAxMo7Nz9zi2vGEoVtcdTAwMGbCeqXC5Ner7SP/7Mf+Wbef6+Z5XtGmup7K04xcdTAwMGbi/GRvf3LcX5x+nYzNNFx1MDAwZWvvXHUwMDFjajXIXHUwMDFjelx0PTxtm9FtXHUwMDBi4Vx1MDAwN0K1zVlcdTAwMTJ0z4x/XHUwMDE2XGK/51xi6Xe6QnaM1zZtkVx1MDAwNK9cdTAwMWFq3Zzj+8hpNEfh9KTzdEtz2qt9/5IsrnVleFo/d5ZcdTAwMTezk/aPj41FT6qSMJ6RUipXeuruLqNcdTAwMTP4JSPoMl6kodaNvz/xMJpcdMj89X95MuZcXIyHji/TndnhdPd01tqbtGe1dah9XHUwMDE4LD5srb5HkFa5Jni7hFx1MDAwMtVBmqHSQnddo7tJW5N1qvWZXHUwMDE3nHW6XHSZTF6v2yNb9V02VdzNSldcbuqW8vTTgf54muiHXHUwMDA0uuuZUlx1MDAxMLieXGLIXHUwMDA2XHL8e+lcdTAwMDSuxpFcdTAwMTlHaun5nv92WXrBM/dTNJn3nr8pnf/fXHLsv3Fo5m+P1fu2WUkuKcn3S6Qof1x1MDAwZj9cdTAwMWT2Lq/f+1DfQ+2+hpewka/kZr5y8HK+Z53jW+xVTs7Sy+Nv4lp9OT/Ls9F85n9swnJ8UVwi4PsyUIp0yF1cdTAwMTffJcNcdTAwMDRv7fNcdTAwMDLX90Fpb8ZYrlNypaO00U+jLENcdTAwMTRKvf1VXHUwMDA21Ifho9eMZv1cdTAwMWWyrT/+vqi+3+bbpki5XHUwMDFid22lXHUwMDE2UrqBI56+MaV6mfnj+2T03feS/PtVbVx1MDAxMv043fvgiVx1MDAxZE5gSlxup1x1MDAwMXyhfc+T9/alXFxd8oVcdTAwMTEqIFx1MDAxMLmPnJR6XHUwMDE1b0PLkuuRh+dcdTAwMTjCtmPWX1x1MDAwMbd6jeDmZ25cdTAwMWR/XHUwMDFjMd544PDfXHUwMDA0z08y94U5U3g9WffszD3rkumvXHUwMDEyst7MmUy6Z/oskV3dXHUwMDEzSS943Vx1MDAxY6rN51x1MDAxMlx1MDAxZH/zNjD5uVx1MDAwMXUweLrF/7hcdTAwMWb2MdGGODiCzb52nUCsv1aHfXvfK5FcdTAwMGbmKrKxSWGtZVC8tlxuxVt0/UAr31x1MDAxMFikUsFcdTAwMDOKVFx1MDAwNk7JJ3WuXHUwMDEx1veIIH5cdTAwMGWxKcd3XHUwMDA0Xn37INL+1HOJnO30ludcdTAwMTJcdTAwMWaPLH26cy5RekL5yiFPziXfKVj5vZ9uz1x1MDAwMJJlpVx1MDAwMVx1MDAwMIG31NFDP1xy/knnXHUwMDEyXHUwMDFmTy282ykhpPak52hpXHUwMDAyRdy+ylxyW/VKllx1MDAwMj9w8VpcdTAwMDasvyN/XpN/yZOJXzbLP9/+WfRXNf4mXHUwMDA3up53//LteSdiXHUwMDA3Xzwn6PG4XHUwMDE3+VEp0JSMI1x1MDAxZG1cdTAwMWOPmHDNT+VDXHUwMDE0XHUwMDFhL199XHUwMDFmXG6Uvlx1MDAxN1xiXHUwMDAxWDpG+c5aNGrFgZ4o6ZUgmPVXKN+83sAzhlx1MDAxMPtcdTAwMWEv0Ht9XHUwMDBlfOmBiidy4OPH7e5xoENL6uFduYHx3MDz11x1MDAxZSvYximRze34hnxpMsy1eOHh7GedXHUwMDE4R8Y92b1cdTAwMDFRIdmVwVx1MDAwM71cImZcdTAwMTakSlx1MDAwMs9cYk1cdTAwMDSu5U+9+lflwI1cdTAwMDDg+z/L/jNJ8JFcdTAwMWSeR97i4irPv5OT+CtcdTAwMTJMlyr/4ndcdTAwMGbSw52vW9uTLT1zg1c+mfLq6Vx1MDAxMlx1MDAxZd6H5CDtXHUwMDEx71x1MDAxNNL339uuSeZgXCJ65OJcdTAwMTBcdTAwMGXcjelcdTAwMTLveTRFu4FyRPBcdTAwMWHG3ss3QslcdTAwMWZ/yzMnf8Z7XFxcdTAwMWWBycbzW47ruo5cdTAwMTTPeItLP1x1MDAxY1//cTCvXHUwMDFkbNdcdTAwMDbmYHE2zKvZ/MOjxC25XHUwMDFiUUJ2WUmvoWSjqfCeIMF741x1MDAxZLyH6k9cdTAwMDRcdHlcdTAwMTDqxa9k+bNB8pdCYX1uX1xcXHUwMDFjUnd6t4qeZrLfLUKYq8F8xlmPrcdcdTAwMTfzL1x1MDAwNfh4XHUwMDEwK0Pr6X/SZGVrfFx1MDAxZffHvcY6cv97dp3+dTlee1+CXHUwMDA13PP/XlxuLWNcdTAwMWKHVv5uv/79TvX/XHUwMDA30U5cdTAwMTKr77uxOs22TOd4eZXkot/ePVx1MDAxMEller2vu7qbOTrKnOtknFxcR4PyXCLaXHUwMDBl8u446Ye73YvT3YPp98PQRJWtRW87TNs7R1x1MDAxN6fqXFysX+uOR6Ou2LvuVUQ/2i7Po0b5Kq7U0zhcdTAwMGaz/UEq4n5ZR4Pwqlapq3Bg/GTnq2hvb1xyv1x1MDAxZu7FUd40+4OhoWeW0bZZxOMqPW/o+WZcdTAwMWFXWldRXs/CSjmNKqg3uao1yiqs1K/iRrRsVIZqf5DkXFw2r+LZZatRx7UsPiyjnjyuhCn1icYk6JrJatvlPDo0XCLKz2f7jdZif0Dj6JezmPpcdTAwMTdcco7O0Sb1W1JcdTAwMWJUTyTo3oLaX9i66in1l9u291x1MDAxZminXHUwMDEyXsWDJvUxos/UdCpVVTs0S4wnXHUwMDFhVLm9jX2sRHS9Sf0rL+PM9jFcdTAwMWEkSyqzrFx1MDAxZIqrqNLKfl1W2LKV01nUaDn7g7KKMpFhTWiO8s3lh1R/NI9y+lx1MDAxY1SdRqUlaZ7ykPpcdTAwMWZhPVx1MDAxYadcdTAwMWLnZr9cdTAwMTFKnsdDUcxtN4rylPrdXHUwMDEyUUZj2TaydvzIulx1MDAwZXjO9H5cdTAwMDPrXHUwMDFhu2F/a9w+Xs5Ixlx1MDAwNlFcdTAwMWWqlmrKsO//9fvu1nl3J01PSc5cdTAwMWGNSHFblTStNajPeVm3MkGyVNY0flx1MDAxYU+V5IXqXHUwMDFihLRcdTAwMTZcdI0pUfuNqqA+YexL6lx1MDAwYj5cdTAwMTfhNq1NntLYQypcdTAwMWJqKp/vXHUwMDBm6rTemO8qrTPJ1SCco96Q1zZ19lx1MDAwZqn+XGbPJTTuqkPzgE9ccjmkeaFxUX9cdTAwMWGRXHQxt41cYjKrbT1RyuswgFxmQ95a9DulOlt8n/pKc1x1MDAxYt6sI36nNPdcdTAwMGXNzZw+XHRvUUoyrOI8mdOcauo/9bepovFijvWIt8tUtmpqO9Vljdc7Qr1cdTAwMGXhKo/yZIWDPtYoTWPgbbA32G9EVM9QU3mD+SF50VRGxVmZ8GKWhNt5rVx1MDAwMblv0diaNIdNJ96pZnGlSeNE30NB/VrGg6KNQ3ErXHUwMDBmmCNaXHUwMDE34oHWMoK8NaI5cIg1oL5Kkmu0aWrbXHUwMDAy/CBcYpPUTl2yLFRoXlx1MDAxYlUq28R6gFx1MDAxZkSt0qJ1XHUwMDFkklx1MDAxY1ZcdTAwMWS6R/1OqV60XHUwMDEzLWJuj57dRvsth+pU4Fx1MDAxZuqHY9ed6yAsXGZpvlKH5zhvkdxhLZpcdTAwMTnVR30oU1x1MDAxZlwi/Macy1qjPo9InmJcdTAwMWVDXdCnQt14hrBAz1x1MDAwMtNVQ9dzbitcdTAwMTM0P3WH5adBc0XrW6P5seuLuqvAhYytzNAn+lpcdTAwMDaHXHUwMDAxU0s7hnpGa0HfsfZpSu1TP1wi8Fx1MDAwN69cdTAwMDFkkWR/XHUwMDExXHUwMDEzXHUwMDFmkozTXHUwMDFjVGnOeVxcaFx1MDAxM2upIFx1MDAxZrVKXHUwMDE18rOkdUrpPv2OUNeCeFx1MDAwMNdcdTAwMDVkgp6z64Yx4ndGbeZcdTAwMTGuq7hcdTAwMDH5pblpQC6prcqQxkHymtdpzZtYXHUwMDFmkocq9X/o1Fx1MDAwZcs0xylkfok1t9chXHUwMDAzZepvXGJcdTAwMWRAn5FD44Is5PS8YpnI8HuY11j26nltZ1x1MDAwMYzSfNJcXGFeXHUwMDE1cdKgibaJiyCnXHSNTeTMJZUkrUFcdTAwMTdcdTAwMDDrgyHkXHUwMDA3fFx1MDAwNVx1MDAwZXJcbn4v+k7zRnxEcoi5p3lFm2Vj5zp17FpEin6rQlx1MDAxZiiLJ1x1MDAwMXxl9nlqu2HHSmPIMSaSXHUwMDFixdy8jb5GkEP6pPnZhkxcdTAwMGZJXHUwMDE2MLeWh1xiu1x1MDAxOXNcdTAwMDC4ssGf4NlcZrJcdTAwMTejzKDKMlx1MDAwNf1GZUlcdTAwMGZcdTAwMDLTVegsWi/CKNZ7gDWgPlx1MDAwMGtccsxJ3ezfrDvphn3mXHUwMDEy5oxFXHJcXI52SVx1MDAxZahdktGyxVx1MDAxMDCYkzxAl9JcXFx1MDAxNXOTY31J9o19Nlx1MDAwNL/hWUnyxlx1MDAxY1Wr8Jou7NqgL1x1MDAxMThxyfJDXHUwMDE44e/ZXHK2WMcuao1cdTAwMTTfM8ggOFx1MDAwNHJb8EIhS+DEKl1cdTAwMGaVnWPSS8RcdTAwMDPcZlx1MDAwNnlcdTAwMDXvXHUwMDAyU1hcdTAwMWbiXHUwMDE1Xr/IyiCVYZzm3OfczpXFXHUwMDEyrVx1MDAxZvBOfMxYwvg1MFx1MDAxYvH6glx1MDAxM6GvmnPqr4SeJlx1MDAxZUZcdTAwMWZkbGXSiVkmMX6WyYWV3aGqXHUwMDFkL7g8eITuXHUwMDEzXHUwMDA3RsvY8iT4T1x1MDAwMF8x46KKPlq5PyQ52y5sh1x1MDAwNsvZMlJcdTAwMGJwXHUwMDBi4YvwxutxNKD6ZY2xUdeFfFLdeD5lLqByKXFxZnkoXHUwMDA1XjRkdp/WMc6YK5aYn1xia1x1MDAwMK5cdTAwMWaAn4Y38lx1MDAwNXuF5pzqXHUwMDA1j1dcbjw3aHwsXHUwMDA3dazF8oaHqV9cdTAwMGWvXHLsK8hqXmV9RXIyp3U1LD9cdTAwMTkwloBcdTAwMWJ13Fx1MDAxOKJvi1x1MDAxODJcdTAwMDd5Zb2XYG2onVx1MDAxNttcdTAwMTNUnmQnXHUwMDAxtjSvd5/1rMN9J11s5YN1aVx1MDAxNrHOSeY8Xq6vNbfrQd95rXh+citHUcayvY3vZaubraxlsE+srmY5Rt+IXHUwMDBiI3Ah1sOugeVGQ3qax1x1MDAxMvFaXHUwMDEzvzKWXHUwMDEzYWUhIVx1MDAxYlximE7BXHUwMDFkorDHMNdkf7Csgv9cdTAwMWTIL3iR7Fx1MDAxMWCcflx1MDAwZuk56je1XHUwMDA3XHUwMDFigvpcdTAwMGU7xOqCSlx1MDAxM/pcdTAwMGJcXMrleS7BySyDXHUwMDExZD1HPzqQz1x1MDAxYzxxSrZcdTAwMTD4ISrknWzWjPWAXHUwMDEzgb9Zb4bA86JWrFx1MDAxMbg8qnTPXHS/6Fx1MDAxYnBg6Fx1MDAxZdlH1m4reFWwXGY1WnlcdTAwMGLPZ2XWT5a3W2RjYbwkg7xewFx1MDAxOGSyVXBcIvQy5q6Zsk5nOVxyoctojetk96Is27M52/g8NshcIuxI0kHET6TPXHUwMDBiXsF60pi2WediTVx1MDAwNOODf1x1MDAxN3qMbJZcdTAwMWHw1uc1kHZcdTAwMWWAT3o2XHUwMDBmU+acXG5/ZswhtL7R7fqGzCkx94U4/1x1MDAxMNiGXHUwMDBlYLlyWC54PVJtZTXMwDNcdTAwMTGN3dqPlp/jSmJg39VcYiPWNsF6soxcdTAwMTOWbmRcdTAwMWNcdTAwMTiy60tzYyyGQrYzSM6XbGeA55nvI75OmJzzWNl2qcJm12wnXHUwMDFj4jOFLoJcdTAwMWMpyDHNtYS88FxcQK5g60F+XHTbtVxut2f56pDsfvBBXHUwMDFmfHVjoyRcdTAwMGXwZnVjU690XHUwMDEyc6ogvEjL563c8lxcQrpcdTAwMWFYXGItl0BcdTAwMDejXHUwMDFk64fReOtcdTAwMTKcXHUwMDA0XHUwMDE5rlWG5PPBvlx1MDAxZFJcdTAwMWbYTlx1MDAwMVx1MDAwNpn/qFxy9FNZf1x1MDAwNHJC9lx1MDAxN/WNdFx1MDAwNGSH8I65SdPCLrf8ynxcdTAwMTjOWVx1MDAxNizussLWXHUwMDEz1latgq/yQnYwT4tcdTAwMWHLYDVcdTAwMDPuYqz7gG1cdTAwMTnoM3BcdPOeravu2GdcdTAwMTPqO9eVWTnl38A2+2wx610rW2xvXHKgK0NwJORcdTAwMWb8kPOaW1x1MDAwZVx1MDAwMZ40jzvDmPCb/Tdj9SF+t1x1MDAxNHM76/BcdTAwMTb0ubZ8XHUwMDAyPVx1MDAxZlr5trJsYvZcdTAwMGaSXHUwMDAyo3Z+bnSxtTvB1zyGgsdpXHUwMDBl2C6F3Vx0WyXJo5XOn1x1MDAxNzqf7VxcXHUwMDFhXHUwMDFi1jpje5ptQujahH0wtn2tLlx1MDAwNZ+SXHJK9j/sjFx1MDAxY7o6gVx1MDAwZVrynNLvyPq2Vlx1MDAwNlx1MDAwN4VtU0nQhmFdulxyXHUwMDE5gi3Eflx1MDAwN9tcdTAwMDdWXHUwMDFlXCJrV1x1MDAwZlingE9hL1x1MDAxMu+wXHIlXG5dXHKdZm2xnVx1MDAwNeTegS6ALVx1MDAxYjXOXHUwMDA3tnzI/iPJsrQ+XHUwMDE15LOOulxuv6BcZu5W0CWwJ2Lm/uFcdTAwMWP6kdZSWJ8wncdcdTAwMWNToL6QjVx1MDAxNfM8tljOrKw2l1x1MDAxNlx1MDAwYpEubD1tsTQkTLJcdTAwMWRcdTAwMGY5XHUwMDE0jFerf1m3RFx1MDAwM7b7cuCFrkmrQ5tz25cqZIt0Z1x1MDAxNX1cdTAwMTJcdTAwMDWng/dhR2uS31xm40S7XHUwMDFjt8DzbGuFrCfAV+B3i03YXsxjS+aHXGayRtzaqGe0fqpmuTCz9lNcdTAwMDK5z1x1MDAwYlx1MDAxZJaxL8e2XHUwMDA0ZFx1MDAxMmvQhD2MtcutXGaFwj6P51x1MDAwNPokMefWLiU/tI+1rFtfO1x1MDAxZmLeqF3Y41W2L1x1MDAwYvshh1x1MDAxY9XAMflwyf5W3mTbk9YkL/ytdM3fctg3ziC3KbdB85Syzd9gWVTWLi1cdTAwMTe8zPzMNiziXHUwMDA11GbO/vh22epo2C2Fz2htQeZTtqOsz96Stt9Dq0PYfk+19b/gR9z4vuDrlkN+RM4ySPJcdTAwMDVcdTAwMGUjO5PagmySTckxXGbGQWbjXHUwMDEztFx1MDAxZcytTbVf2GjAXHUwMDAzx3MyzHdds1x1MDAxZJPf+L5RVuhcdTAwMDdh/VHyp1x1MDAxOJOFX5ojjlFnf4bHXHLbgOaD11x1MDAwMnbIXHUwMDAw/lx1MDAxMM9cdTAwMDFwU3Ap/E3Gn0ScjPxcYsfW0XLsXHUwMDFhQN6xNqRcdTAwMDP7bNfQWMA7dWF1WKLYjlx1MDAxYbBcXJvCls8tVtmHhe21ZF+3kdz48Mb2gedC3PjfzO8nXHUwMDE3c8tcdTAwMGbAN61cdTAwMTHr1ch0XHUwMDE4Q8zt2q41/IeokOvugPmK9DhsXHUwMDBl+Fx1MDAxZrw+22XLb+zvhDd+Smaxy2tcZptqYe3eXHUwMDE2yyF0UZxcdTAwMTWyXHUwMDA2LGGNLY9cdTAwMTV+XHUwMDE0dNdcdTAwMTD+krD2x8GAfaxcdTAwMWNYhl1Xhj0tithazjyEXHUwMDE4Z99iXHUwMDAzMbS4XHUwMDExn7P/b20m+EiFL4qYV1x1MDAxNZi+aV/VKudup4H4XHUwMDFkx1x1MDAwNVx1MDAxNi20M1xiIb9oXHUwMDAzNrHiWFxybFx1MDAxOevj07qlurDDXHUwMDA0sFx1MDAxZFx1MDAxZlx1MDAxMz9cIo6GuF6hb61vyr5cdTAwMTbZdoxtYfmrXGaeXTK+XHUwMDA2dev7c1xcJeGYpcVnXCJbbEeki4JbXGb87VwivpNcdTAwMTVxXHUwMDE32Hm6w23w3C5cIn1cdTAwMTFcdTAwMTO3ZjxusqVrXHUwMDFjXHUwMDA33ZtBd2KNyT5eXHUwMDE2PpQtg/VcdTAwMWRHzKFWLzfRXHUwMDFl/OybdcysjividrDhjqvcn1pcdTAwMTGfILujiCVWwVx1MDAxNTSfbDdyXGbU+s02nkPyfVx1MDAxM2ODTkZcXCq3Nr7IY/hcdTAwMWHQ95i7XHUwMDAx6SjogUaS22e5v47VXHUwMDFkMa1VKGvMxcBIPKDvhKu6jfVcdTAwMTHfU79cdTAwMDXHXHQwfubuqNDbiE8gdso+KuzQRWRjkTKucDyO9Fx1MDAwMa1xznaCZt+0gZhK3eEyWOO8ztxa4L7ww+FTRjx2+Edsi9hYXHUwMDE3cf9cdTAwMTD6RkLP1KzNwXZcIsrb9U1s7L1cdTAwMGZ/u2XXl+NcdTAwMTVDxjF8Q7u+0dLGXHUwMDFkWqmNO1x1MDAwMddNtrVcbjxcdTAwMGKOXHUwMDE5wma39edtcDvs//GC/WTm+Ia1qVknckw8LGyScHmDlWi76CfG2YA83sbYpOWT0MqfvqD5aFx1MDAxNnJcdTAwMDXMNlx1MDAxNc3DkueFeW/osP22XfjvXHUwMDFjK1x1MDAxOc2KWEFh/ybKcpiVcfbFOUaHeDXHMax+y2H7Qlx1MDAxZbBfwVxcvbB1MleBI0hPhFx1MDAwZfeBcFFwP2PPclx1MDAwYuqF/q+yruH9XG72x6GrkqX1IWBHtW701Fx1MDAxNXhcdTAwMTUxOdYxha1Vsz6RXHUwMDEzWV+cZOur28FaXHUwMDBmklwidnSDp3R561x1MDAwZrFNhlx1MDAxOCQ4XGK+PONKXHUwMDE3c73gsXHcXHUwMDFh/WNcdTAwMWQqOL7XYL2trc8+zGO1uGJba1x1MDAwMN2DsXJcdTAwMWPB2qE7XHUwMDEx206EXHUwMDAzY+OyTeg4UbM6T7CPS3ZkPChDd9G4XHUwMDEz61x1MDAwYqhcdTAwMDX0bFx1MDAxMedNmeuZh1x1MDAxYk3Vgj4jncrrw+uZLFhXXCKOzrqs4DiSv5jjXHUwMDFkZFx1MDAxZpP/UWvA58VYWsKOXHUwMDE1MUP2eYE1trN57nZcIpaZXCI2n1x1MDAxN/tKXHUwMDA17lx1MDAxMjtcdTAwMDc5/FX6PLZy2+FcdTAwMTgkx/xcdTAwMTRsPXBRbG1m1COZR1x1MDAxYa1cdTAwMWKehNxbX3y70GFccuwz1JWN1dehPyT70DZcdTAwMWUvXHUwMDExu6ixf1x1MDAxMc25ruNFXHUwMDFlsy9SZ05cdTAwMDLu2lx1MDAxY7ePeYzsXHUwMDFi33wydoFcdTAwMTXWXcpiZNpcdTAwMGZ3Y9E7Xo6+XHUwMDFm7onTk3Oxfzy6Ot05uupWpouaPlx1MDAxOPV26/PW8fLiVFx1MDAxOTfRXHUwMDA358mk7nV26JlDOT09XHUwMDFlTdq7dbczXHUwMDBlss7x19nN893dvfPOJFx1MDAxZXf03rw2dq4746bXXHUwMDFhL69bajZPdveuT/XeKNHxRYfq7O6EXHUwMDFllc3a6ii7eVx1MDAxNn1oa/JwTrZG+8en151JfZ7orVFLjcbt4/i8uzO67lxmLlx1MDAxYa1jJ8d+UEc5Yv+kO2pcdTAwMWZ3p92K6JONvqDrg45aXidcdTAwMDPRXHUwMDBmc+xhYX8jTE/Ho1mHnlx01emY/+2HKY0r66j5XGJ7SsXVMZXPaVx1MDAxY9dt1Zx31WjY3UmDkP3PMO3o01EyPp11dFx1MDAxMoST04tEjfqdnWY/3LF9PVx1MDAxZH+dt4+XXHUwMDBlzWnRR/+v3+2+6V/Xzq9cXPbu7LH7ruN6a294Rlx1MDAwNthBb37Z713//NR69sfT/1x1MDAxYeVL9pyf/6cu32PPOYvv7jfz73t7zYuQeY3+u90z3LtcIllcdTAwMTmtrW1cdTAwMTCOY5lM9kadyUGF5WVNXHUwMDFlSKZu21uXXHUwMDA3ln+Sw4TaScaB7IzrjCP6fkm4mLWPnVF7XHUwMDFjXFx0XHUwMDA2a/d34lnrJM6pXHUwMDBmXHUwMDE3LVx1MDAxNVxchTsjkk8z7+58Jdk5yuxvJ98/OTgnrIySvrzunlx1MDAxY3Bd95/dP6Yxboe3e54r+b1cdTAwMWSf3T+nOSA9K7rkp1x1MDAxZFa23J/n4f6+aZV4Pp5ZO/BoRuVcdTAwMWTS74a5j+xcdTAwMGK6znGoXHUwMDBl2VP7pCtvfj9cXE+V+DBN2Zc4XGavv1x1MDAwZpaL1snBNNypXHUwMDA34Vx1MDAxMHFcdTAwMDfSL+NcdTAwMDWt0bC/n9/b92fbbY905nr5xXWiTyff0//5n0cw5DueXnu7wVx1MDAwNlxm2afuYOjJXHUwMDE5KC/B0PPTW/6DoccxNOtcdTAwMWXHXHUwMDE3pzvNtLN7NGhcdTAwMWZcdTAwMWZQO/G0fXw0O92WXHUwMDAzXHUwMDFh06CdyVx1MDAwMmdLSXpjSHXm7Z3RqLND+kydky6Z9e/JZFx1MDAxNsFfVFx1MDAxMY25iThDXHUwMDExXHUwMDBm5FirQry31jg6h03O/lx1MDAwNPxcdTAwMGXEXHUwMDFhMti/Xyt2v1xiMV7o8zLv+yFuczQ4vVMmxH7kccQ+XHSwwz407ynyniHZXHUwMDE0e+e8XHUwMDE3wbGlql4rd8V7rlXeV5I2XHUwMDFlXHT7XHUwMDAzz7Dvl8LeXGIrVtfzJ7Xb69/D3ljAfsw4/1x1MDAwMLHyRp3xt69JlnLzXGK2XHUwMDAy4Vx1MDAwNZ73K2xcdTAwMTVP3dVPT/2TXHUwMDEzL9JPz/57XHUwMDE2/8HWc7F1XHUwMDFjX3dOtmSXdVx1MDAwZuuiXHUwMDA373H+1vho0N3e0nRPtDHOXHUwMDA26j6/tc1qNFx1MDAwZlx1MDAxOD/sqF6D7mM8XHUwMDEzsq1cdTAwMDah1Vx1MDAwM/fLXHUwMDBmylx1MDAwZpZHOS6P+nf3jJ2D+3pcYnHA82l0XHUwMDFmXHUwMDAzhf6p7aTqabZZoFx1MDAwMuOv/7HHh2XfPnVXrzz1lfMv0ivPfp/9f2T/2bLfnXXU3uhnubfXIfOkX7KWWpKdXHUwMDE2Q1x1MDAwZZeQzZZcdTAwMWWSTFfRt4yvbT8og4izIK7F+52cc2b5XHUwMDFl87OwsaKf72Ev8WdZZ5tpXG68PJHPtTHe+lGeh2XaPnWXz5/6bq9cdTAwMTfx+bNfXHUwMDFj9u8o04hp/SS3LfV1QXVmXHUwMDFkfXR1ur1Wjvzzzvjoj45cdTAwMWFdrd8nmTxPdERtWtltg6v7UrSO92anJ6H9PYnW/IqAeFx1MDAxY3U17z9LNtPyuntcXF/ZTDd9I1x1MDAwZSb//+qU+NzKIcnoTprFu60srqZcdTAwMGKyP2597uK5+ziwse1cZnxcdTAwMWNxXlx1MDAxZO9xWt+CY4ZcdTAwMWNzrpR1eLuPxvdErVx1MDAxMlx1MDAwZjinmPf4m0VuU9P6XGZcdTAwMTXszdc5d9SWq5tojD2SYVx1MDAxZfK+I/I2OG6EfVx1MDAwMth7mc3LTWTI+8Y0c4hcdTAwMTdcIm/N5r1cdTAwMTR7PE2F32Rv5TbPXHL5MV9cdTAwMDdcdTAwMTHn+KSp3a9ETFx1MDAxMbmxZd63s7ZcdTAwMWNyLOy+SnvbOLVj1D3EXHUwMDFlx4LbrbRs7ofNb1x1MDAxMkXsneo8XHUwMDFksK05XHUwMDE4XCKnY857bIjn8D3MXHKXlbaOVLdtLoCJ8/SJPOBcdTAwMWKP/KFf6bbiqTs88OQ3f72EXHUwMDA3nv9asX9HXHUwMDFleGu7bnp6cppx3OF4OTpFnG3MMb35KfW7fbKVt0+iueWCrZvy7D91J3vMXHUwMDFkxFx1MDAxYuPutszYPtuW5F8tL5Ld+ILQ8mBMXHUwMDAw8cnogXhcdTAwMDDH2MfIta7ej1x1MDAwN2DvKcW+XHUwMDBm/DLkXHUwMDE43dd9XHUwMDFkvi9wP33ofqORsm7kvbmnxeKU4zrrR/1cdTAwMWbGhH3qXHUwMDBlJp78eoyXYOL57954O0ys5Fxuua+LcFx1MDAwN/vo5bRzglxcpVx1MDAxNee39FFGNv1Vt0oytFx1MDAxM0CeXHUwMDE1+em6Plx1MDAwZWArWZumUeb8sMhiZs13KN/Hm33m3jmRmGVrq0Lcjlx1MDAxYytZY1sp1DXss1xmWmnMuVx1MDAxZmHGOobzuHhcdTAwMGZlafMsTyPicvAzeFuRnERR3tz0O4/yvYjqgy7Io+FC8J451/fAb957PqhcdTAwMTAvI4dI2fK811x1MDAwYp9dcC5aZZja+qvI25Q13nPHPnVkXCLeXHUwMDE3O1wiLYezXHUwMDA3TVx1MDAwN/l2dlx1MDAxZlx1MDAxMflVLXm7f1DBflMrJ12iYs6bQ/nmMlx1MDAxYUbrv5Xd0yWsrOpTNj9cZvm2TXOTv0m6J+c9oGYkY97/qNN4Wpr3wFx1MDAwNmG6Nlx1MDAxZVVDvlx1MDAxYu991W3eXHUwMDAz4lx1MDAxMIOhzSu4M1x1MDAxZtWF3UNcdTAwMWXRfCBcdTAwMGY95XziXCJcdTAwMTdcdTAwMDR1cm63vVZP7ZriO81fPlx1MDAxNEWOXCLfK+RF1zhPIbn5LWmtKsX3XGY535zfdVu2XFzUudUmLlre9JX+s/v5w0jw+Fx1MDAxYlx1MDAxMfJRMfdcIrJ2SF7jPJVyflNHPafPPDK8d0S8XHUwMDE1I05UQW7gMKdy2NtFTkox7qHDuarNxdq1lkOysnz0N/pSOSDZrN7KYo1zPuqiUXmg/6OL2PYpZDmN8z2at/qSc3My/EZcdTAwMWVcdTAwMTTOmiSC9z1cdTAwMTH/yVx1MDAxM85cdTAwMTXFtZrNXHUwMDBiwDXBe0PDhcNncWx+sIHsYH7J/sl4XHUwMDFmkWSxhlxczVx1MDAwMfZcZoeS99aRf1FcdTAwMTlyLj/Jolx1MDAxM3M+gt2jtvvLpyiPXHUwMDFjJuTTSbLpIDske0PF51B4f5hzS5GX4fB+IMtcdM5twCZcdTAwMWFRecg679lhP1xme+o35Wm+qijP525oXHK4vM1FpPKcm4T+t3gvma9cdTAwMTV5XHUwMDBi9lrI+atcdTAwMTHyOivYc1x1MDAxZCq7v1x1MDAxYqJOh3NPqlx1MDAxMdVcdFlIOFeBz4tcdTAwMTBebHnOXHUwMDE5lLyfynLG5W9wyOVobmw5zpNJi3KcNyY5zyWHXHUwMDFkt95u1dq7RfmIzys1+excdTAwMDfJd7o2XHUwMDE25D4scVx1MDAwZcTOXHUwMDBm9vhQZ8r73/V8fX6QW5Ni39vOL++DNpe8PsSVtjznXHUwMDBlSs6LakTLonyxPpzzXHUwMDAwjuX1RVx1MDAwZVx1MDAxMpXPbE4kymN9q7b8oM7ySuVX8tGA3dtCLiaVRy5cdGKWTexj5vQsla+r4mxcYnGPXf/GmnxcdTAwMTI32fqrkcZ5u4j3ppFcdTAwMWKDPDDiLpz9IZnlPVictcqRK5EsOVx1MDAwZpu5LbL5aiiPc1xyXHIuj/nj/HNeXHUwMDA3sjX43Fx1MDAxOPNcdTAwMDfOcSTcn1x1MDAxYfshraxBWLf5XHUwMDAwTeZcdTAwMDPqXHUwMDFm8qe05TG263Pun61LYFx1MDAxZYinlthcdTAwMGbnvOtcdTAwMWM4seex1vtm9VdcdTAwMWRcXKQ5XHUwMDE3XCJH3yBcdTAwMWbwiVroy01eMuGZc7VQPq/Zc0vAbnY8XHUwMDE0a5xbL/KfXHUwMDBlXCLbr1x1MDAxYp1W5ZyzOulcdTAwMWTOf+/TNc5nxnmJpr7VKyxfnJtN7d/RK2u/64pz71x1MDAxYjxcdTAwMTe53TOvQ0a15YPbdlx1MDAwNZ9ryevi+Od4nFx1MDAwNlx1MDAxZtGc5Cx7NHdYe8w18oitr1x1MDAxM1x1MDAxNZxbdnhO+WxPxOMvclx1MDAxYYs5XHJlnXjN5k9C93O+ouJcdTAwMWOsnPlK8rxw7iPOoUZOkSMsbI4un1x1MDAxZEU+sNVlNldcdTAwMGW6kDCXXCKXWlx1MDAxNnjTkMdcdTAwMWHLc9Wx+lx1MDAwNzkxsFxyhrnNQahcdTAwMTZcdTAwMTjmNXU4R7+CvLom2udzUcivqPGZiiZhXHUwMDAyOYVcXOfC5oqAj1x1MDAxMs5LXHUwMDAyXHUwMDA2bmW4wvnOJmpinpC/XHUwMDA02eJ5XCLOJozmdeZQ5MJHXHUwMDE1zvnFOUecybS/XHRTXHUwMDBmrIHD+a2Htk3MXHUwMDAz4TbjfDrO60GeXHUwMDA3ctPKjj2rwLiHPJNNXHUwMDAzLlxub3L/iz4zbiGbXHUwMDBl5y9cZqqq0LN2XHUwMDFkXHUwMDFhTeTGL1x1MDAxOdfATY68XHUwMDE09smVzWHCPPPa2ryiQci5MLUmyqcoj7xcdTAwMTj0XHUwMDBmNmFqdV1rybI8QL4mn+3kc4/IXHUwMDE5Kc5cdTAwMDTnnPeEfCq7rszNNqdcbnPYXFxy/kxcdTAwMDPcXGaeSJGPzpxeI44r1pX00LDQXHR1VYNcXFx1MDAwMG9cdTAwMTWcr+D5UOCBXHUwMDFh58DUc7uGLc4/oWua94d4LknmjqZPjdW5gedp71exOvvU7aH3f/7ln/9cdTAwMGbX67+/In0= + + + + + Your ClusterLapdev EnvironmentLapdev-Kube-ManagerSecure Websocket TunnelDevboxPreview URLLapdev EnvironmentLapdev EnvironmentAPI ServerLapdev \ No newline at end of file diff --git a/docs/index.mdx b/docs/index.mdx new file mode 100644 index 0000000..c1f5866 --- /dev/null +++ b/docs/index.mdx @@ -0,0 +1 @@ +sdfsdfdsf \ No newline at end of file diff --git a/lapdev-api/pages/main.css b/lapdev-api/pages/main.css deleted file mode 100644 index 85e5c9a..0000000 --- a/lapdev-api/pages/main.css +++ /dev/null @@ -1,1042 +0,0 @@ -/* -! tailwindcss v3.4.1 | MIT License | https://tailwindcss.com -*/ - -/* -1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4) -2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116) -*/ - -*, -::before, -::after { - box-sizing: border-box; - /* 1 */ - border-width: 0; - /* 2 */ - border-style: solid; - /* 2 */ - border-color: #E5E7EB; - /* 2 */ -} - -::before, -::after { - --tw-content: ''; -} - -/* -1. Use a consistent sensible line-height in all browsers. -2. Prevent adjustments of font size after orientation changes in iOS. -3. Use a more readable tab size. -4. Use the user's configured `sans` font-family by default. -5. Use the user's configured `sans` font-feature-settings by default. -6. Use the user's configured `sans` font-variation-settings by default. -7. Disable tap highlights on iOS -*/ - -html, -:host { - line-height: 1.5; - /* 1 */ - -webkit-text-size-adjust: 100%; - /* 2 */ - -moz-tab-size: 4; - /* 3 */ - -o-tab-size: 4; - tab-size: 4; - /* 3 */ - font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; - /* 4 */ - font-feature-settings: normal; - /* 5 */ - font-variation-settings: normal; - /* 6 */ - -webkit-tap-highlight-color: transparent; - /* 7 */ -} - -/* -1. Remove the margin in all browsers. -2. Inherit line-height from `html` so users can set them as a class directly on the `html` element. -*/ - -body { - margin: 0; - /* 1 */ - line-height: inherit; - /* 2 */ -} - -/* -1. Add the correct height in Firefox. -2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655) -3. Ensure horizontal rules are visible by default. -*/ - -hr { - height: 0; - /* 1 */ - color: inherit; - /* 2 */ - border-top-width: 1px; - /* 3 */ -} - -/* -Add the correct text decoration in Chrome, Edge, and Safari. -*/ - -abbr:where([title]) { - -webkit-text-decoration: underline dotted; - text-decoration: underline dotted; -} - -/* -Remove the default font size and weight for headings. -*/ - -h1, -h2, -h3, -h4, -h5, -h6 { - font-size: inherit; - font-weight: inherit; -} - -/* -Reset links to optimize for opt-in styling instead of opt-out. -*/ - -a { - color: inherit; - text-decoration: inherit; -} - -/* -Add the correct font weight in Edge and Safari. -*/ - -b, -strong { - font-weight: bolder; -} - -/* -1. Use the user's configured `mono` font-family by default. -2. Use the user's configured `mono` font-feature-settings by default. -3. Use the user's configured `mono` font-variation-settings by default. -4. Correct the odd `em` font sizing in all browsers. -*/ - -code, -kbd, -samp, -pre { - font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; - /* 1 */ - font-feature-settings: normal; - /* 2 */ - font-variation-settings: normal; - /* 3 */ - font-size: 1em; - /* 4 */ -} - -/* -Add the correct font size in all browsers. -*/ - -small { - font-size: 80%; -} - -/* -Prevent `sub` and `sup` elements from affecting the line height in all browsers. -*/ - -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} - -sub { - bottom: -0.25em; -} - -sup { - top: -0.5em; -} - -/* -1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297) -2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016) -3. Remove gaps between table borders by default. -*/ - -table { - text-indent: 0; - /* 1 */ - border-color: inherit; - /* 2 */ - border-collapse: collapse; - /* 3 */ -} - -/* -1. Change the font styles in all browsers. -2. Remove the margin in Firefox and Safari. -3. Remove default padding in all browsers. -*/ - -button, -input, -optgroup, -select, -textarea { - font-family: inherit; - /* 1 */ - font-feature-settings: inherit; - /* 1 */ - font-variation-settings: inherit; - /* 1 */ - font-size: 100%; - /* 1 */ - font-weight: inherit; - /* 1 */ - line-height: inherit; - /* 1 */ - color: inherit; - /* 1 */ - margin: 0; - /* 2 */ - padding: 0; - /* 3 */ -} - -/* -Remove the inheritance of text transform in Edge and Firefox. -*/ - -button, -select { - text-transform: none; -} - -/* -1. Correct the inability to style clickable types in iOS and Safari. -2. Remove default button styles. -*/ - -button, -[type='button'], -[type='reset'], -[type='submit'] { - -webkit-appearance: button; - /* 1 */ - background-color: transparent; - /* 2 */ - background-image: none; - /* 2 */ -} - -/* -Use the modern Firefox focus style for all focusable elements. -*/ - -:-moz-focusring { - outline: auto; -} - -/* -Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737) -*/ - -:-moz-ui-invalid { - box-shadow: none; -} - -/* -Add the correct vertical alignment in Chrome and Firefox. -*/ - -progress { - vertical-align: baseline; -} - -/* -Correct the cursor style of increment and decrement buttons in Safari. -*/ - -::-webkit-inner-spin-button, -::-webkit-outer-spin-button { - height: auto; -} - -/* -1. Correct the odd appearance in Chrome and Safari. -2. Correct the outline style in Safari. -*/ - -[type='search'] { - -webkit-appearance: textfield; - /* 1 */ - outline-offset: -2px; - /* 2 */ -} - -/* -Remove the inner padding in Chrome and Safari on macOS. -*/ - -::-webkit-search-decoration { - -webkit-appearance: none; -} - -/* -1. Correct the inability to style clickable types in iOS and Safari. -2. Change font properties to `inherit` in Safari. -*/ - -::-webkit-file-upload-button { - -webkit-appearance: button; - /* 1 */ - font: inherit; - /* 2 */ -} - -/* -Add the correct display in Chrome and Safari. -*/ - -summary { - display: list-item; -} - -/* -Removes the default spacing and border for appropriate elements. -*/ - -blockquote, -dl, -dd, -h1, -h2, -h3, -h4, -h5, -h6, -hr, -figure, -p, -pre { - margin: 0; -} - -fieldset { - margin: 0; - padding: 0; -} - -legend { - padding: 0; -} - -ol, -ul, -menu { - list-style: none; - margin: 0; - padding: 0; -} - -/* -Reset default styling for dialogs. -*/ - -dialog { - padding: 0; -} - -/* -Prevent resizing textareas horizontally by default. -*/ - -textarea { - resize: vertical; -} - -/* -1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300) -2. Set the default placeholder color to the user's configured gray 400 color. -*/ - -input::-moz-placeholder, textarea::-moz-placeholder { - opacity: 1; - /* 1 */ - color: #9CA3AF; - /* 2 */ -} - -input::placeholder, -textarea::placeholder { - opacity: 1; - /* 1 */ - color: #9CA3AF; - /* 2 */ -} - -/* -Set the default cursor for buttons. -*/ - -button, -[role="button"] { - cursor: pointer; -} - -/* -Make sure disabled buttons don't get the pointer cursor. -*/ - -:disabled { - cursor: default; -} - -/* -1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14) -2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210) - This can trigger a poorly considered lint error in some tools but is included by design. -*/ - -img, -svg, -video, -canvas, -audio, -iframe, -embed, -object { - display: block; - /* 1 */ - vertical-align: middle; - /* 2 */ -} - -/* -Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14) -*/ - -img, -video { - max-width: 100%; - height: auto; -} - -/* Make elements with the HTML hidden attribute stay hidden by default */ - -[hidden] { - display: none; -} - -[data-popper-arrow],[data-popper-arrow]:before { - position: absolute; - width: 8px; - height: 8px; - background: inherit; -} - -[data-popper-arrow] { - visibility: hidden; -} - -[data-popper-arrow]:before { - content: ""; - visibility: visible; - transform: rotate(45deg); -} - -[data-popper-arrow]:after { - content: ""; - visibility: visible; - transform: rotate(45deg); - position: absolute; - width: 9px; - height: 9px; - background: inherit; -} - -[role="tooltip"] > [data-popper-arrow]:before { - border-style: solid; - border-color: #e5e7eb; -} - -[role="tooltip"] > [data-popper-arrow]:after { - border-style: solid; - border-color: #e5e7eb; -} - -[data-popover][role="tooltip"][data-popper-placement^='top'] > [data-popper-arrow]:before { - border-bottom-width: 1px; - border-right-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='top'] > [data-popper-arrow]:after { - border-bottom-width: 1px; - border-right-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='right'] > [data-popper-arrow]:before { - border-bottom-width: 1px; - border-left-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='right'] > [data-popper-arrow]:after { - border-bottom-width: 1px; - border-left-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='bottom'] > [data-popper-arrow]:before { - border-top-width: 1px; - border-left-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='bottom'] > [data-popper-arrow]:after { - border-top-width: 1px; - border-left-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='left'] > [data-popper-arrow]:before { - border-top-width: 1px; - border-right-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='left'] > [data-popper-arrow]:after { - border-top-width: 1px; - border-right-width: 1px; -} - -[data-popover][role="tooltip"][data-popper-placement^='top'] > [data-popper-arrow] { - bottom: -5px; -} - -[data-popover][role="tooltip"][data-popper-placement^='bottom'] > [data-popper-arrow] { - top: -5px; -} - -[data-popover][role="tooltip"][data-popper-placement^='left'] > [data-popper-arrow] { - right: -5px; -} - -[data-popover][role="tooltip"][data-popper-placement^='right'] > [data-popper-arrow] { - left: -5px; -} - -[type='text'],[type='email'],[type='url'],[type='password'],[type='number'],[type='date'],[type='datetime-local'],[type='month'],[type='search'],[type='tel'],[type='time'],[type='week'],[multiple],textarea,select { - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; - background-color: #fff; - border-color: #6B7280; - border-width: 1px; - border-radius: 0px; - padding-top: 0.5rem; - padding-right: 0.75rem; - padding-bottom: 0.5rem; - padding-left: 0.75rem; - font-size: 1rem; - line-height: 1.5rem; - --tw-shadow: 0 0 #0000; -} - -[type='text']:focus, [type='email']:focus, [type='url']:focus, [type='password']:focus, [type='number']:focus, [type='date']:focus, [type='datetime-local']:focus, [type='month']:focus, [type='search']:focus, [type='tel']:focus, [type='time']:focus, [type='week']:focus, [multiple]:focus, textarea:focus, select:focus { - outline: 2px solid transparent; - outline-offset: 2px; - --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: #1C64F2; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color); - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow); - border-color: #1C64F2; -} - -input::-moz-placeholder, textarea::-moz-placeholder { - color: #6B7280; - opacity: 1; -} - -input::placeholder,textarea::placeholder { - color: #6B7280; - opacity: 1; -} - -::-webkit-datetime-edit-fields-wrapper { - padding: 0; -} - -::-webkit-date-and-time-value { - min-height: 1.5em; -} - -select:not([size]) { - background-image: url("data:image/svg+xml,%3csvg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 10 6'%3e %3cpath stroke='%236B7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m1 1 4 4 4-4'/%3e %3c/svg%3e"); - background-position: right 0.75rem center; - background-repeat: no-repeat; - background-size: 0.75em 0.75em; - padding-right: 2.5rem; - -webkit-print-color-adjust: exact; - print-color-adjust: exact; -} - -:is([dir=rtl]) select:not([size]) { - background-position: left 0.75rem center; - padding-right: 0.75rem; - padding-left: 0; -} - -[multiple] { - background-image: initial; - background-position: initial; - background-repeat: unset; - background-size: initial; - padding-right: 0.75rem; - -webkit-print-color-adjust: unset; - print-color-adjust: unset; -} - -[type='checkbox'],[type='radio'] { - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; - padding: 0; - -webkit-print-color-adjust: exact; - print-color-adjust: exact; - display: inline-block; - vertical-align: middle; - background-origin: border-box; - -webkit-user-select: none; - -moz-user-select: none; - user-select: none; - flex-shrink: 0; - height: 1rem; - width: 1rem; - color: #1C64F2; - background-color: #fff; - border-color: #6B7280; - border-width: 1px; - --tw-shadow: 0 0 #0000; -} - -[type='checkbox'] { - border-radius: 0px; -} - -[type='radio'] { - border-radius: 100%; -} - -[type='checkbox']:focus,[type='radio']:focus { - outline: 2px solid transparent; - outline-offset: 2px; - --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); - --tw-ring-offset-width: 2px; - --tw-ring-offset-color: #fff; - --tw-ring-color: #1C64F2; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow); -} - -[type='checkbox']:checked,[type='radio']:checked,.dark [type='checkbox']:checked,.dark [type='radio']:checked { - border-color: transparent; - background-color: currentColor; - background-size: 0.55em 0.55em; - background-position: center; - background-repeat: no-repeat; -} - -[type='checkbox']:checked { - background-image: url("data:image/svg+xml,%3csvg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 12'%3e %3cpath stroke='white' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M1 5.917 5.724 10.5 15 1.5'/%3e %3c/svg%3e"); - background-repeat: no-repeat; - background-size: 0.55em 0.55em; - -webkit-print-color-adjust: exact; - print-color-adjust: exact; -} - -[type='radio']:checked { - background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e"); - background-size: 1em 1em; -} - -[type='checkbox']:indeterminate { - background-image: url("data:image/svg+xml,%3csvg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 12'%3e %3cpath stroke='white' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M1 5.917 5.724 10.5 15 1.5'/%3e %3c/svg%3e"); - background-color: currentColor; - border-color: transparent; - background-position: center; - background-repeat: no-repeat; - background-size: 0.55em 0.55em; - -webkit-print-color-adjust: exact; - print-color-adjust: exact; -} - -[type='checkbox']:indeterminate:hover,[type='checkbox']:indeterminate:focus { - border-color: transparent; - background-color: currentColor; -} - -[type='file'] { - background: unset; - border-color: inherit; - border-width: 0; - border-radius: 0; - padding: 0; - font-size: unset; - line-height: inherit; -} - -[type='file']:focus { - outline: 1px auto inherit; -} - -input[type=file]::file-selector-button { - color: white; - background: #1F2937; - border: 0; - font-weight: 500; - font-size: 0.875rem; - cursor: pointer; - padding-top: 0.625rem; - padding-bottom: 0.625rem; - padding-left: 2rem; - padding-right: 1rem; - margin-inline-start: -1rem; - margin-inline-end: 1rem; -} - -input[type=file]::file-selector-button:hover { - background: #374151; -} - -:is([dir=rtl]) input[type=file]::file-selector-button { - padding-right: 2rem; - padding-left: 1rem; -} - -input[type="range"]::-webkit-slider-thumb { - height: 1.25rem; - width: 1.25rem; - background: #1C64F2; - border-radius: 9999px; - border: 0; - appearance: none; - -moz-appearance: none; - -webkit-appearance: none; - cursor: pointer; -} - -input[type="range"]:disabled::-webkit-slider-thumb { - background: #9CA3AF; -} - -input[type="range"]:focus::-webkit-slider-thumb { - outline: 2px solid transparent; - outline-offset: 2px; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(4px + var(--tw-ring-offset-width)) var(--tw-ring-color); - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-ring-opacity: 1px; - --tw-ring-color: rgb(164 202 254 / var(--tw-ring-opacity)); -} - -input[type="range"]::-moz-range-thumb { - height: 1.25rem; - width: 1.25rem; - background: #1C64F2; - border-radius: 9999px; - border: 0; - appearance: none; - -moz-appearance: none; - -webkit-appearance: none; - cursor: pointer; -} - -input[type="range"]:disabled::-moz-range-thumb { - background: #9CA3AF; -} - -input[type="range"]::-moz-range-progress { - background: #3F83F8; -} - -input[type="range"]::-ms-fill-lower { - background: #3F83F8; -} - -*, ::before, ::after { - --tw-border-spacing-x: 0; - --tw-border-spacing-y: 0; - --tw-translate-x: 0; - --tw-translate-y: 0; - --tw-rotate: 0; - --tw-skew-x: 0; - --tw-skew-y: 0; - --tw-scale-x: 1; - --tw-scale-y: 1; - --tw-pan-x: ; - --tw-pan-y: ; - --tw-pinch-zoom: ; - --tw-scroll-snap-strictness: proximity; - --tw-gradient-from-position: ; - --tw-gradient-via-position: ; - --tw-gradient-to-position: ; - --tw-ordinal: ; - --tw-slashed-zero: ; - --tw-numeric-figure: ; - --tw-numeric-spacing: ; - --tw-numeric-fraction: ; - --tw-ring-inset: ; - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: rgb(63 131 248 / 0.5); - --tw-ring-offset-shadow: 0 0 #0000; - --tw-ring-shadow: 0 0 #0000; - --tw-shadow: 0 0 #0000; - --tw-shadow-colored: 0 0 #0000; - --tw-blur: ; - --tw-brightness: ; - --tw-contrast: ; - --tw-grayscale: ; - --tw-hue-rotate: ; - --tw-invert: ; - --tw-saturate: ; - --tw-sepia: ; - --tw-drop-shadow: ; - --tw-backdrop-blur: ; - --tw-backdrop-brightness: ; - --tw-backdrop-contrast: ; - --tw-backdrop-grayscale: ; - --tw-backdrop-hue-rotate: ; - --tw-backdrop-invert: ; - --tw-backdrop-opacity: ; - --tw-backdrop-saturate: ; - --tw-backdrop-sepia: ; -} - -::backdrop { - --tw-border-spacing-x: 0; - --tw-border-spacing-y: 0; - --tw-translate-x: 0; - --tw-translate-y: 0; - --tw-rotate: 0; - --tw-skew-x: 0; - --tw-skew-y: 0; - --tw-scale-x: 1; - --tw-scale-y: 1; - --tw-pan-x: ; - --tw-pan-y: ; - --tw-pinch-zoom: ; - --tw-scroll-snap-strictness: proximity; - --tw-gradient-from-position: ; - --tw-gradient-via-position: ; - --tw-gradient-to-position: ; - --tw-ordinal: ; - --tw-slashed-zero: ; - --tw-numeric-figure: ; - --tw-numeric-spacing: ; - --tw-numeric-fraction: ; - --tw-ring-inset: ; - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: rgb(63 131 248 / 0.5); - --tw-ring-offset-shadow: 0 0 #0000; - --tw-ring-shadow: 0 0 #0000; - --tw-shadow: 0 0 #0000; - --tw-shadow-colored: 0 0 #0000; - --tw-blur: ; - --tw-brightness: ; - --tw-contrast: ; - --tw-grayscale: ; - --tw-hue-rotate: ; - --tw-invert: ; - --tw-saturate: ; - --tw-sepia: ; - --tw-drop-shadow: ; - --tw-backdrop-blur: ; - --tw-backdrop-brightness: ; - --tw-backdrop-contrast: ; - --tw-backdrop-grayscale: ; - --tw-backdrop-hue-rotate: ; - --tw-backdrop-invert: ; - --tw-backdrop-opacity: ; - --tw-backdrop-saturate: ; - --tw-backdrop-sepia: ; -} - -.mx-auto { - margin-left: auto; - margin-right: auto; -} - -.mb-6 { - margin-bottom: 1.5rem; -} - -.mr-4 { - margin-right: 1rem; -} - -.flex { - display: flex; -} - -.h-10 { - height: 2.5rem; -} - -.h-6 { - height: 1.5rem; -} - -.h-9 { - height: 2.25rem; -} - -.w-1\/2 { - width: 50%; -} - -.w-10 { - width: 2.5rem; -} - -.w-64 { - width: 16rem; -} - -.grid-cols-4 { - grid-template-columns: repeat(4, minmax(0, 1fr)); -} - -.grid-cols-7 { - grid-template-columns: repeat(7, minmax(0, 1fr)); -} - -.flex-col { - flex-direction: column; -} - -.items-center { - align-items: center; -} - -.justify-center { - justify-content: center; -} - -.rounded-e-lg { - border-start-end-radius: 0.5rem; - border-end-end-radius: 0.5rem; -} - -.rounded-l-lg { - border-top-left-radius: 0.5rem; - border-bottom-left-radius: 0.5rem; -} - -.rounded-r-lg { - border-top-right-radius: 0.5rem; - border-bottom-right-radius: 0.5rem; -} - -.rounded-s-lg { - border-start-start-radius: 0.5rem; - border-end-start-radius: 0.5rem; -} - -.bg-gray-200 { - --tw-bg-opacity: 1; - background-color: rgb(229 231 235 / var(--tw-bg-opacity)); -} - -.bg-gray-50 { - --tw-bg-opacity: 1; - background-color: rgb(249 250 251 / var(--tw-bg-opacity)); -} - -.px-6 { - padding-left: 1.5rem; - padding-right: 1.5rem; -} - -.py-8 { - padding-top: 2rem; - padding-bottom: 2rem; -} - -.text-2xl { - font-size: 1.5rem; - line-height: 2rem; -} - -.text-lg { - font-size: 1.125rem; - line-height: 1.75rem; -} - -.font-semibold { - font-weight: 600; -} - -.leading-6 { - line-height: 1.5rem; -} - -.leading-9 { - line-height: 2.25rem; -} - -.text-gray-900 { - --tw-text-opacity: 1; - color: rgb(17 24 39 / var(--tw-text-opacity)); -} - -.text-red-600 { - --tw-text-opacity: 1; - color: rgb(224 36 36 / var(--tw-text-opacity)); -} - -.shadow-lg { - --tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1); - --tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -} - -:is(.dark .dark\:bg-gray-900) { - --tw-bg-opacity: 1; - background-color: rgb(17 24 39 / var(--tw-bg-opacity)); -} - -:is(.dark .dark\:text-white) { - --tw-text-opacity: 1; - color: rgb(255 255 255 / var(--tw-text-opacity)); -} - -@media (min-width: 768px) { - .md\:h-screen { - height: 100vh; - } -} - -@media (min-width: 1024px) { - .lg\:py-0 { - padding-top: 0px; - padding-bottom: 0px; - } -} \ No newline at end of file diff --git a/lapdev-api/pages/not_authorised.html b/lapdev-api/pages/not_authorised.html deleted file mode 100644 index fe03705..0000000 --- a/lapdev-api/pages/not_authorised.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - Lapdev Dashboard - - - -
    -
    - - - - - - - Lapdev - -

    Not authorised

    -
    -
    - - diff --git a/lapdev-api/pages/not_forwarded.html b/lapdev-api/pages/not_forwarded.html deleted file mode 100644 index 9e94084..0000000 --- a/lapdev-api/pages/not_forwarded.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - Lapdev Dashboard - - - -
    -
    - - - - - - - Lapdev - -

    Workspace port not forwarded

    -
    -
    - - diff --git a/lapdev-api/pages/not_found.html b/lapdev-api/pages/not_found.html deleted file mode 100644 index cc58110..0000000 --- a/lapdev-api/pages/not_found.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - Lapdev Dashboard - - - -
    -
    - - - - - - - Lapdev - -

    Workspace not found

    -
    -
    - - diff --git a/lapdev-api/pages/not_running.html b/lapdev-api/pages/not_running.html deleted file mode 100644 index 5757809..0000000 --- a/lapdev-api/pages/not_running.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - Lapdev Dashboard - - - -
    -
    - - - - - - - Lapdev - -

    Workspace not running

    -
    -
    - - diff --git a/lapdev-api/src/router.rs b/lapdev-api/src/router.rs deleted file mode 100644 index 0203b9a..0000000 --- a/lapdev-api/src/router.rs +++ /dev/null @@ -1,470 +0,0 @@ -use axum::{ - body::Body, - extract::{Host, State, WebSocketUpgrade}, - http::Request, - middleware::{self, Next}, - response::{IntoResponse, Response}, - routing::{any, delete, get, post, put}, - Router, -}; -use axum_client_ip::SecureClientIpSource; -use axum_extra::{headers, TypedHeader}; -use hyper::StatusCode; -use lapdev_common::WorkspaceStatus; -use lapdev_proxy_http::{forward::ProxyForward, proxy::WorkspaceForwardError}; -use lapdev_rpc::error::ApiError; - -use crate::{ - account, admin, machine_type, organization, project, - session::{logout, new_session, session_authorize}, - state::CoreState, - websocket::handle_websocket, - workspace, -}; - -static STATIC_DIR: include_dir::Dir = - include_dir::include_dir!("$CARGO_MANIFEST_DIR/../lapdev-dashboard/dist"); -const PAGE_NOT_FOUND: &[u8] = include_bytes!("../pages/not_found.html"); -const PAGE_NOT_AUTHORISED: &[u8] = include_bytes!("../pages/not_authorised.html"); -const PAGE_NOT_RUNNING: &[u8] = include_bytes!("../pages/not_running.html"); -const PAGE_NOT_FORWARDED: &[u8] = include_bytes!("../pages/not_forwarded.html"); -const PAGE_CSS: &[u8] = include_bytes!("../pages/main.css"); - -fn private_routes() -> Router { - Router::new() - .route("/session", get(new_session)) - .route("/session", delete(logout)) - .route("/session/authorize", get(session_authorize)) - .route("/me", get(account::me)) - .route( - "/me/organization/:org_id", - put(account::set_current_organization), - ) -} - -async fn not_found() -> impl IntoResponse { - StatusCode::NOT_FOUND -} - -fn v1_api_routes(additional_router: Option>) -> Router { - let router = Router::new() - .route("/", any(not_found)) - .route("/*0", any(not_found)) - .route("/cluster_info", get(admin::get_cluster_info)) - .route("/auth_providers", get(admin::get_auth_providers)) - .route("/hostnames", get(admin::get_hostnames)) - .route("/machine_types", get(machine_type::all_machine_types)) - .route("/join/:invitation_id", put(organization::join_organization)) - .route("/organizations", post(organization::create_organization)) - .route( - "/organizations/:org_id", - delete(organization::delete_organization), - ) - .route( - "/organizations/:org_id/name", - put(organization::update_org_name), - ) - .route( - "/organizations/:org_id/auto_start_stop", - put(organization::update_org_auto_start_stop), - ) - .route( - "/organizations/:org_id/members", - get(organization::get_organization_members), - ) - .route( - "/organizations/:org_id/members/:user_id", - put(organization::update_organization_member), - ) - .route( - "/organizations/:org_id/members/:user_id", - delete(organization::delete_organization_member), - ) - .route( - "/organizations/:org_id/invitations", - post(organization::create_user_invitation), - ) - .route( - "/organizations/:org_id/usage", - get(organization::get_organization_usage), - ) - .route( - "/organizations/:org_id/audit_logs", - get(organization::get_organization_audit_log), - ) - .route( - "/organizations/:org_id/quota", - get(organization::get_organization_quota), - ) - .route( - "/organizations/:org_id/quota", - put(organization::update_organization_quota), - ) - .route( - "/organizations/:org_id/projects", - post(project::create_project), - ) - .route( - "/organizations/:org_id/projects", - get(project::all_projects), - ) - .route( - "/organizations/:org_id/projects/:project_id", - get(project::get_project), - ) - .route( - "/organizations/:org_id/projects/:project_id", - delete(project::delete_project), - ) - .route( - "/organizations/:org_id/projects/:project_id/branches", - get(project::get_project_branches), - ) - .route( - "/organizations/:org_id/projects/:project_id/prebuilds", - get(project::get_project_prebuilds), - ) - .route( - "/organizations/:org_id/projects/:project_id/prebuilds", - post(project::create_project_prebuild), - ) - .route( - "/organizations/:org_id/projects/:project_id/prebuilds/:prebuild_id", - delete(project::delete_project_prebuild), - ) - .route( - "/organizations/:org_id/projects/:project_id/machine_type/:machine_type_id", - put(project::update_project_machine_type), - ) - .route( - "/organizations/:org_id/projects/:project_id/env", - get(project::get_project_env), - ) - .route( - "/organizations/:org_id/projects/:project_id/env", - put(project::update_project_env), - ) - .route( - "/organizations/:org_id/workspaces", - post(workspace::create_workspace), - ) - .route( - "/organizations/:org_id/workspaces", - get(workspace::all_workspaces), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name", - get(workspace::get_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name", - delete(workspace::delete_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/start", - post(workspace::start_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/stop", - post(workspace::stop_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/pin", - post(workspace::pin_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/unpin", - post(workspace::unpin_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/rebuild", - post(workspace::rebuild_workspace), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/ports", - get(workspace::workspace_ports), - ) - .route( - "/organizations/:org_id/workspaces/:workspace_name/ports/:port", - put(workspace::update_workspace_port), - ) - .route("/account/ssh_keys", post(account::create_ssh_key)) - .route("/account/ssh_keys", get(account::all_ssh_keys)) - .route("/account/ssh_keys/:key_id", delete(account::delete_ssh_key)) - .route("/account/git_providers", get(account::get_git_providers)) - .route( - "/account/git_providers/connect", - put(account::connect_git_provider), - ) - .route( - "/account/git_providers/disconnect", - put(account::disconnect_git_provider), - ) - .route( - "/account/git_providers/update_scope", - put(account::update_scope), - ) - .route("/admin/workspace_hosts", get(admin::get_workspace_hosts)) - .route("/admin/workspace_hosts", post(admin::create_workspace_host)) - .route( - "/admin/workspace_hosts/:workspace_host_id", - put(admin::update_workspace_host), - ) - .route( - "/admin/workspace_hosts/:workspace_host_id", - delete(admin::delete_workspace_host), - ) - .route("/admin/license", get(admin::get_license)) - .route("/admin/license", put(admin::update_license)) - .route("/admin/new_license", post(admin::new_license)) - .route("/admin/oauth", get(admin::get_oauth)) - .route("/admin/oauth", put(admin::update_oauth)) - .route("/admin/certs", get(admin::get_certs)) - .route("/admin/certs", put(admin::update_certs)) - .route("/admin/hostnames", put(admin::update_hostnames)) - .route("/admin/cpu_overcommit", get(admin::get_cpu_overcommit)) - .route( - "/admin/cpu_overcommit/:value", - put(admin::update_cpu_overcommit), - ) - .route( - "/admin/machine_types", - post(machine_type::create_machine_type), - ) - .route( - "/admin/machine_types/:machine_type_id", - put(machine_type::update_machine_type), - ) - .route( - "/admin/machine_types/:machine_type_id", - delete(machine_type::delete_machine_type), - ) - .route("/admin/users", get(admin::get_cluster_users)) - .route("/admin/users/:user_id", put(admin::update_cluster_user)) - .route( - "/admin/organizations/:org_id/clear", - put(organization::clear_organization), - ); - if let Some(additional) = additional_router { - router.merge(additional) - } else { - router - } -} - -fn main_routes(additional_router: Option>) -> Router { - Router::new() - .nest("/v1", v1_api_routes(additional_router)) - .nest("/private", private_routes()) -} - -pub async fn build_router( - state: CoreState, - additional_router: Option>, -) -> Router<()> { - Router::new() - .route("/", any(handle_catch_all)) - .route("/*0", any(handle_catch_all)) - .route("/health-check", get(health_check)) - .nest("/api", main_routes(additional_router)) - .route_layer(middleware::from_fn_with_state( - state.clone(), - forward_middleware, - )) - .with_state(state) - .layer(SecureClientIpSource::ConnectInfo.into_extension()) -} - -async fn health_check() {} - -async fn handle_catch_all( - websocket: Option, - State(state): State, - TypedHeader(cookie): TypedHeader, - req: Request, -) -> Result { - let path = req.uri().path(); - - if let Some(websocket) = websocket { - let uri = req.uri(); - let path = uri.path(); - let query = uri.query(); - let resp = handle_websocket(path, query, websocket, cookie, state).await?; - return Ok(resp); - } - - if let Some(f) = path.strip_prefix("/static/") { - let accept_gzip = req - .headers() - .get("Accept-Encoding") - .and_then(|h| h.to_str().ok()) - .map(|s| s.contains("gzip")) - .unwrap_or(false); - if let Some(file) = if let Some(d) = state.static_dir.as_ref() { - d.get_file(f) - } else { - STATIC_DIR.get_file(f) - } { - let content_type = if f.ends_with(".css") { - Some("text/css") - } else if f.ends_with(".js") { - Some("text/javascript") - } else if f.ends_with(".wasm") { - Some("application/wasm") - } else if f.ends_with(".ico") { - Some("image/vnd.microsoft.icon") - } else { - None - }; - if let Some(content_type) = content_type { - if accept_gzip { - if let Some(file) = if let Some(d) = state.static_dir.as_ref() { - d.get_file(format!("{f}.gz")) - } else { - STATIC_DIR.get_file(format!("{f}.gz")) - } { - return Ok(( - [ - (axum::http::header::CONTENT_TYPE, content_type), - ( - axum::http::header::CACHE_CONTROL, - "public, max-age=31536000", - ), - (axum::http::header::CONTENT_ENCODING, "gzip"), - ], - file.contents(), - ) - .into_response()); - } - } - return Ok(( - [ - (axum::http::header::CONTENT_TYPE, content_type), - ( - axum::http::header::CACHE_CONTROL, - "public, max-age=31536000", - ), - ], - file.contents(), - ) - .into_response()); - } - } - } - - if let Some(file) = if let Some(d) = state.static_dir.as_ref() { - d.get_file("index.html") - } else { - STATIC_DIR.get_file("index.html") - } { - return Ok(axum::response::Html::from(file.contents()).into_response()); - } - - Err(ApiError::InvalidRequest("Invalid Request".to_string())) -} - -async fn forward_middleware( - Host(hostname): Host, - websocket: Option, - State(state): State, - TypedHeader(cookie): TypedHeader, - req: axum::extract::Request, - next: Next, -) -> Result { - let path = req.uri().path(); - let path_query = req - .uri() - .path_and_query() - .map(|v| v.as_str()) - .unwrap_or(path); - let hostname = hostname - .split(":") - .next() - .ok_or_else(|| ApiError::InternalError("can't split : from hostname".to_string()))?; - let is_forward = state - .conductor - .hostnames - .read() - .await - .values() - .any(|v| v != hostname && hostname.ends_with(v)); - if !is_forward { - if let Some(websocket) = websocket { - let uri = req.uri(); - let path = uri.path(); - let query = uri.query(); - let resp = handle_websocket(path, query, websocket, cookie, state).await?; - return Ok(resp); - } - - return Ok(next.run(req).await); - } - - if path.starts_with("/error-page/lapdev-main.css") { - return Ok(( - [ - (axum::http::header::CONTENT_TYPE, "text/css"), - ( - axum::http::header::CACHE_CONTROL, - "public, max-age=31536000", - ), - ], - PAGE_CSS, - ) - .into_response()); - } - - let user = state.authenticate(&cookie).await.ok(); - match lapdev_proxy_http::proxy::forward_workspace(hostname, &state.db, user.as_ref()).await { - Ok((ws, port)) => { - if ws.status != WorkspaceStatus::Running.to_string() { - return Ok(axum::response::Html::from(PAGE_NOT_RUNNING).into_response()); - } - - let Some(workspace_host) = state.db.get_workspace_host(ws.host_id).await.ok().flatten() - else { - return Ok(axum::response::Html::from(PAGE_NOT_RUNNING).into_response()); - }; - - let port = port - .map(|p| p.host_port as u16) - .or_else(|| ws.ide_port.map(|p| p as u16)); - if let Some(forward) = lapdev_proxy_http::forward::handler( - &workspace_host.host, - path_query, - websocket, - req.headers(), - port, - ) - .await - { - match forward { - ProxyForward::Resp(resp) => Ok(resp), - ProxyForward::Proxy(uri) => { - // *req.uri_mut() = uri; - let headers = req.headers().clone(); - let mut new_req = Request::builder() - .method(req.method()) - .uri(uri) - .body(req.into_body()) - .unwrap(); - *new_req.headers_mut() = headers; - let resp = state.hyper_client.request(new_req).await?; - Ok(resp.into_response()) - } - } - } else { - Ok(axum::response::Html::from(PAGE_NOT_FOUND).into_response()) - } - } - Err(e) => { - let b = match e { - WorkspaceForwardError::WorkspaceNotFound => PAGE_NOT_FOUND, - WorkspaceForwardError::PortNotForwarded => PAGE_NOT_FORWARDED, - WorkspaceForwardError::InvalidHostname => PAGE_NOT_RUNNING, - WorkspaceForwardError::Unauthorised => PAGE_NOT_AUTHORISED, - }; - Ok(axum::response::Html::from(b).into_response()) - } - } -} diff --git a/lapdev-api/src/server.rs b/lapdev-api/src/server.rs deleted file mode 100644 index d94349b..0000000 --- a/lapdev-api/src/server.rs +++ /dev/null @@ -1,226 +0,0 @@ -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; - -use anyhow::{anyhow, Context, Result}; -use axum::{extract::Request, Router}; -use clap::Parser; -use futures_util::pin_mut; -use hyper::body::Incoming; -use hyper_util::rt::{TokioExecutor, TokioIo}; -use lapdev_conductor::Conductor; -use lapdev_db::api::DbApi; -use serde::Deserialize; -use tokio::net::TcpListener; -use tokio_rustls::TlsAcceptor; -use tower::Service; -use tracing::error; - -use crate::{cert::tls_config, router, state::CoreState}; - -pub const LAPDEV_VERSION: &str = env!("CARGO_PKG_VERSION"); - -#[derive(Clone, Deserialize, Default)] -#[serde(rename_all = "kebab-case")] -struct LapdevConfig { - db: Option, - bind: Option, - http_port: Option, - https_port: Option, - ssh_proxy_port: Option, - ssh_proxy_display_port: Option, - force_osuser: Option, -} - -#[derive(Parser)] -#[clap(name = "lapdev")] -#[clap(version = env!("CARGO_PKG_VERSION"))] -struct Cli { - /// The config file path - #[clap(short, long, action, value_hint = clap::ValueHint::AnyPath)] - config_file: Option, - /// The folder for putting logs - #[clap(short, long, action, value_hint = clap::ValueHint::AnyPath)] - logs_folder: Option, - /// The folder for putting data - #[clap(short, long, action, value_hint = clap::ValueHint::AnyPath)] - data_folder: Option, - /// Don't run db migration on startup - #[clap(short, long, action)] - no_migration: bool, -} - -pub async fn start( - additional_router: Option>, - static_dir: Option>, -) { - let cli = Cli::parse(); - let data_folder = cli - .data_folder - .clone() - .unwrap_or_else(|| PathBuf::from("/var/lib/lapdev")); - - let _result = setup_log(&cli, &data_folder).await; - - if let Err(e) = run(&cli, additional_router, data_folder, static_dir).await { - tracing::error!("lapdev api start server error: {e:#}"); - } -} - -async fn run( - cli: &Cli, - additional_router: Option>, - data_folder: PathBuf, - static_dir: Option>, -) -> Result<()> { - let config_file = cli - .config_file - .clone() - .unwrap_or_else(|| PathBuf::from("/etc/lapdev.conf")); - let config_content = tokio::fs::read_to_string(&config_file) - .await - .with_context(|| format!("can't read config file {}", config_file.to_string_lossy()))?; - let config: LapdevConfig = - toml::from_str(&config_content).with_context(|| "wrong config file format")?; - let db_url = config - .db - .ok_or_else(|| anyhow!("can't find database url in your config file"))?; - - let db = DbApi::new(&db_url, cli.no_migration).await?; - let conductor = - Conductor::new(LAPDEV_VERSION, db.clone(), data_folder, config.force_osuser).await?; - - let ssh_proxy_port = config.ssh_proxy_port.unwrap_or(2222); - let ssh_proxy_display_port = config.ssh_proxy_display_port.unwrap_or(2222); - { - let conductor = conductor.clone(); - let bind = config.bind.clone(); - tokio::spawn(async move { - if let Err(e) = lapdev_proxy_ssh::server::run( - conductor, - bind.as_deref().unwrap_or("0.0.0.0"), - ssh_proxy_port, - ) - .await - { - error!("ssh proxy error: {e:?}"); - } - }); - } - - { - tokio::spawn(async move { - if let Err(e) = lapdev_kube::run().await { - error!("kube run error {e:?}"); - } - }); - } - - let state = CoreState::new( - conductor, - ssh_proxy_port, - ssh_proxy_display_port, - static_dir, - ) - .await; - let app = router::build_router(state.clone(), additional_router).await; - let certs = state.certs.clone(); - - { - // start http server - let bind = format!( - "{}:{}", - config.bind.clone().unwrap_or_else(|| "0.0.0.0".to_string()), - config.http_port.unwrap_or(80) - ); - let tcp_listener = TcpListener::bind(&bind) - .await - .with_context(|| format!("bind to {bind}"))?; - let app = app.clone(); - tokio::spawn(async move { - if let Err(err) = axum::serve(tcp_listener, app.into_make_service()).await { - tracing::error!("http server stopped error: {err}"); - } - }); - } - - let bind = format!( - "{}:{}", - config.bind.unwrap_or_else(|| "0.0.0.0".to_string()), - config.https_port.unwrap_or(443) - ); - let tcp_listener = TcpListener::bind(&bind) - .await - .with_context(|| format!("bind to {bind}"))?; - let tls_acceptor = TlsAcceptor::from(Arc::new(tls_config(certs)?)); - - pin_mut!(tcp_listener); - loop { - let tower_service = app.clone(); - let tls_acceptor = tls_acceptor.clone(); - - // Wait for new tcp connection - let (cnx, addr) = tcp_listener.accept().await?; - - tokio::spawn(async move { - // Wait for tls handshake to happen - let stream = match tls_acceptor.accept(cnx).await { - Err(_) => { - return; - } - Ok(stream) => stream, - }; - - // Hyper has its own `AsyncRead` and `AsyncWrite` traits and doesn't use tokio. - // `TokioIo` converts between them. - let stream = TokioIo::new(stream); - - // Hyper also has its own `Service` trait and doesn't use tower. We can use - // `hyper::service::service_fn` to create a hyper `Service` that calls our app through - // `tower::Service::call`. - let hyper_service = hyper::service::service_fn(move |request: Request| { - // We have to clone `tower_service` because hyper's `Service` uses `&self` whereas - // tower's `Service` requires `&mut self`. - // - // We don't need to call `poll_ready` since `Router` is always ready. - tower_service.clone().call(request) - }); - - let ret = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new()) - .serve_connection_with_upgrades(stream, hyper_service) - .await; - - if let Err(err) = ret { - tracing::warn!("error serving connection from {}: {}", addr, err); - } - }); - } -} - -async fn setup_log( - cli: &Cli, - data_folder: &Path, -) -> Result { - let folder = cli - .logs_folder - .clone() - .unwrap_or_else(|| data_folder.join("logs")); - tokio::fs::create_dir_all(&folder).await?; - let file_appender = tracing_appender::rolling::Builder::new() - .max_log_files(30) - .rotation(tracing_appender::rolling::Rotation::DAILY) - .filename_prefix("lapdev.log") - .build(folder)?; - let (non_blocking, guard) = tracing_appender::non_blocking(file_appender); - let var = std::env::var("RUST_LOG").unwrap_or_default(); - let var = - format!("error,lapdev=info,lapdev_api=info,lapdev_conductor=info,lapdev_rpc=info,lapdev_common=info,lapdev_db=info,lapdev_enterprise=info,lapdev_proxy_ssh=info,lapdev_proxy_http=info,{var}"); - let filter = tracing_subscriber::EnvFilter::builder().parse_lossy(var); - tracing_subscriber::fmt() - .with_ansi(false) - .with_env_filter(filter) - .with_writer(non_blocking) - .init(); - Ok(guard) -} diff --git a/lapdev-api/src/state.rs b/lapdev-api/src/state.rs deleted file mode 100644 index 8a5f6b7..0000000 --- a/lapdev-api/src/state.rs +++ /dev/null @@ -1,267 +0,0 @@ -use std::{collections::HashMap, sync::Arc}; - -use anyhow::{anyhow, Context, Result}; -use axum::{ - async_trait, body::Body, extract::FromRequestParts, http::request::Parts, RequestPartsExt, -}; -use axum_client_ip::InsecureClientIp; -use axum_extra::{ - headers::{self, UserAgent}, - TypedHeader, -}; -use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor}; -use lapdev_common::LAPDEV_BASE_HOSTNAME; -use lapdev_conductor::{scheduler::LAPDEV_CPU_OVERCOMMIT, Conductor}; -use lapdev_db::{api::DbApi, entities}; -use lapdev_enterprise::license::LAPDEV_ENTERPRISE_LICENSE; -use lapdev_rpc::error::ApiError; -use pasetors::{ - claims::ClaimsValidationRules, - keys::SymmetricKey, - token::{TrustedToken, UntrustedToken}, - version4::V4, -}; -use sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; -use serde::Deserialize; -use sqlx::postgres::PgNotification; -use tokio_rustls::rustls::sign::CertifiedKey; -use uuid::Uuid; - -use crate::{ - auth::{Auth, AuthConfig}, - cert::{load_cert, CertStore}, - github::GithubClient, - session::OAUTH_STATE_COOKIE, -}; - -pub const TOKEN_COOKIE_NAME: &str = "token"; -pub const LAPDEV_CERTS: &str = "lapdev-certs"; - -pub type HyperClient = hyper_util::client::legacy::Client; - -pub struct RequestInfo { - pub ip: Option, - pub user_agent: Option, -} - -#[derive(Debug, Deserialize)] -struct ConfigUpdatePayload { - name: String, - value: String, -} - -#[async_trait] -impl FromRequestParts for RequestInfo { - type Rejection = ApiError; - - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let ip = InsecureClientIp::from(&parts.headers, &parts.extensions) - .ok() - .map(|ip| ip.0.to_string()); - let user_agent = parts.extract::>().await.ok(); - let user_agent = user_agent.map(|u| u.to_string()); - Ok(Self { user_agent, ip }) - } -} - -#[derive(Clone)] -pub struct CoreState { - pub conductor: Conductor, - pub github_client: GithubClient, - pub hyper_client: Arc, - pub db: DbApi, - pub auth: Arc, - pub auth_token_key: Arc>, - pub certs: CertStore, - // actuall ssh proxy port - pub ssh_proxy_port: u16, - // ssh proxy port to display in front end - pub ssh_proxy_display_port: u16, - pub static_dir: Arc>>, -} - -impl CoreState { - pub async fn new( - conductor: Conductor, - ssh_proxy_port: u16, - ssh_proxy_display_port: u16, - static_dir: Option>, - ) -> Self { - let github_client = GithubClient::new(); - let key = conductor.db.load_api_auth_token_key().await; - let auth = Auth::new(&conductor.db).await; - let certs = load_certs(&conductor.db).await.unwrap_or_default(); - - let hyper_client: HyperClient = - hyper_util::client::legacy::Client::<(), ()>::builder(TokioExecutor::new()) - .build(HttpConnector::new()); - - let state = Self { - db: conductor.db.clone(), - conductor, - github_client, - auth: Arc::new(auth), - auth_token_key: Arc::new(key), - certs: Arc::new(std::sync::RwLock::new(Arc::new(certs))), - ssh_proxy_port, - ssh_proxy_display_port, - hyper_client: Arc::new(hyper_client), - static_dir: Arc::new(static_dir), - }; - - { - let state = state.clone(); - tokio::spawn(async move { - if let Err(e) = state.monitor_config_updates().await { - tracing::error!("api monitor config updates error: {e}"); - } - }); - } - - state - } - - async fn monitor_config_updates(&self) -> Result<()> { - let pool = self - .db - .pool - .clone() - .ok_or_else(|| anyhow!("db doesn't have pg pool"))?; - let mut listener = sqlx::postgres::PgListener::connect_with(&pool).await?; - listener.listen("config_update").await?; - loop { - let notification = listener.recv().await?; - if let Err(e) = self.handle_config_update_notification(notification).await { - tracing::error!("handle config update notification error: {e:#}"); - } - } - } - - async fn handle_config_update_notification(&self, notification: PgNotification) -> Result<()> { - let payload: ConfigUpdatePayload = serde_json::from_str(notification.payload()) - .with_context(|| format!("trying to deserialize payload {}", notification.payload()))?; - if payload.name == AuthConfig::GITHUB.client_id - || payload.name == AuthConfig::GITHUB.client_secret - || payload.name == AuthConfig::GITLAB.client_id - || payload.name == AuthConfig::GITLAB.client_secret - { - self.auth.resync(&self.db).await; - } else if payload.name == LAPDEV_ENTERPRISE_LICENSE { - self.conductor.enterprise.license.resync_license().await; - } else if payload.name == LAPDEV_CPU_OVERCOMMIT { - if let Ok(v) = payload.value.parse::() { - *self.conductor.cpu_overcommit.write().await = v.max(1); - } - } else if payload.name == LAPDEV_BASE_HOSTNAME { - *self.conductor.hostnames.write().await = self - .conductor - .enterprise - .get_hostnames() - .await - .unwrap_or_default(); - } else if payload.name == LAPDEV_CERTS { - if let Ok(certs) = load_certs(&self.db).await { - if let Ok(mut current) = self.certs.write() { - *current = Arc::new(certs); - } - } - } - Ok(()) - } - - fn cookie_token(&self, cookie: &headers::Cookie, name: &str) -> Result { - let token = cookie.get(name).ok_or(ApiError::Unauthenticated)?; - let untrusted_token = - UntrustedToken::try_from(token).map_err(|_| ApiError::Unauthenticated)?; - let token = pasetors::local::decrypt( - &self.auth_token_key, - &untrusted_token, - &ClaimsValidationRules::new(), - None, - None, - ) - .map_err(|_| ApiError::Unauthenticated)?; - Ok(token) - } - - pub fn auth_state_token(&self, cookie: &headers::Cookie) -> Result { - self.cookie_token(cookie, OAUTH_STATE_COOKIE) - } - - pub fn token(&self, cookie: &headers::Cookie) -> Result { - self.cookie_token(cookie, TOKEN_COOKIE_NAME) - } - - pub async fn require_enterprise(&self) -> Result<(), ApiError> { - if self.conductor.enterprise.has_valid_license().await { - return Ok(()); - } - Err(ApiError::EnterpriseInvalid) - } - - pub async fn authenticate( - &self, - cookie: &headers::Cookie, - ) -> Result { - let token = self.token(cookie)?; - let user_id: Uuid = token - .payload_claims() - .and_then(|c| c.get_claim("user_id")) - .and_then(|v| serde_json::from_value(v.to_owned()).ok()) - .ok_or(ApiError::Unauthenticated)?; - let user = entities::user::Entity::find_by_id(user_id) - .filter(entities::user::Column::DeletedAt.is_null()) - .one(&self.db.conn) - .await? - .ok_or(ApiError::Unauthenticated)?; - Ok(user) - } - - pub async fn authenticate_cluster_admin( - &self, - cookie: &headers::Cookie, - ) -> Result { - let user = self.authenticate(cookie).await?; - if !user.cluster_admin { - return Err(ApiError::Unauthorized); - } - Ok(user) - } - - pub async fn get_project( - &self, - cookie: &headers::Cookie, - org_id: Uuid, - project_id: Uuid, - ) -> Result<(entities::user::Model, entities::project::Model), ApiError> { - let user = self.authenticate(cookie).await?; - self.db - .get_organization_member(user.id, org_id) - .await - .map_err(|_| ApiError::Unauthorized)?; - let project = self - .db - .get_project(project_id) - .await - .map_err(|_| ApiError::InvalidRequest("project doesn't exist".to_string()))?; - if project.organization_id != org_id { - return Err(ApiError::Unauthorized); - } - Ok((user, project)) - } -} - -async fn load_certs(db: &DbApi) -> Result>> { - let certs = db.get_config(LAPDEV_CERTS).await?; - let certs: Vec<(String, String)> = serde_json::from_str(&certs)?; - - let mut final_certs = HashMap::new(); - for (cert, key) in certs { - if let Ok((dns_names, cert)) = load_cert(&cert, &key) { - for dns_name in dns_names { - final_certs.insert(dns_name, Arc::new(cert.clone())); - } - } - } - Ok(final_certs) -} diff --git a/lapdev-common/src/utils.rs b/lapdev-common/src/utils.rs deleted file mode 100644 index 534ab07..0000000 --- a/lapdev-common/src/utils.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::time::{SystemTime, UNIX_EPOCH}; - -use rand::{distributions::Alphanumeric, Rng}; -use sha2::{Digest, Sha256}; - -pub fn rand_string(n: usize) -> String { - rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(n) - .map(|i| i as char) - .collect::() - .to_lowercase() -} - -pub fn sha256(input: &str) -> String { - let hash = Sha256::digest(input.as_bytes()); - base16ct::lower::encode_string(&hash) -} - -pub fn format_repo_url(repo: &str) -> String { - let repo = repo.trim().to_lowercase(); - let repo = if !repo.starts_with("http://") - && !repo.starts_with("https://") - && !repo.starts_with("ssh://") - { - format!("https://{repo}") - } else { - repo.to_string() - }; - repo.strip_suffix('/') - .map(|r| r.to_string()) - .unwrap_or(repo) -} - -pub fn unix_timestamp() -> anyhow::Result { - Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs()) -} diff --git a/lapdev-dashboard/Cargo.toml b/lapdev-dashboard/Cargo.toml deleted file mode 100644 index f63f612..0000000 --- a/lapdev-dashboard/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "lapdev-dashboard" -version.workspace = true -authors.workspace = true -edition.workspace = true - -[dependencies] -gloo-net = "0.5.0" -wasm-bindgen = "0.2.88" -web-sys = "0.3.65" -urlencoding = "2.1.3" -leptos = { version = "0.6.5", features = ["csr"] } -leptos_router = { version = "0.6.5", features = ["csr"] } -rust_decimal.workspace = true -chrono.workspace = true -uuid.workspace = true -serde.workspace = true -serde_json.workspace = true -anyhow.workspace = true -futures.workspace = true -lapdev-common.workspace = true diff --git a/lapdev-dashboard/src/account.rs b/lapdev-dashboard/src/account.rs deleted file mode 100644 index 22f05cd..0000000 --- a/lapdev-dashboard/src/account.rs +++ /dev/null @@ -1,238 +0,0 @@ -use anyhow::Result; -use gloo_net::http::Request; -use lapdev_common::{ - console::{MeUser, NewSessionResponse}, - AuthProvider, ClusterInfo, -}; -use leptos::{ - component, create_action, create_local_resource, expect_context, use_context, view, window, - IntoView, Resource, RwSignal, Signal, SignalGet, SignalGetUntracked, SignalSet, SignalWith, -}; -use leptos_router::use_params_map; - -use crate::{cluster::OauthSettings, modal::ErrorResponse}; - -pub async fn get_login() -> Result { - let resp: MeUser = Request::get("/api/private/me").send().await?.json().await?; - Ok(resp) -} - -async fn now_login(provider: AuthProvider) -> Result<()> { - let location = window().window().location(); - let next = location.href().unwrap_or_default(); - let next = urlencoding::encode(&next).to_string(); - let resp = Request::get("/api/private/session") - .query([ - ("provider", &provider.to_string()), - ("next", &next), - ("host", &location.origin().unwrap_or_default()), - ]) - .send() - .await?; - let resp: NewSessionResponse = resp.json().await?; - let _ = window().location().set_href(&resp.url); - - Ok(()) -} - -async fn now_logout() { - let _ = Request::delete("/api/private/session").send().await; - let _ = window().location().set_href("/"); -} - -#[component] -pub fn Login() -> impl IntoView { - let cluster_info = expect_context::>>(); - - view! { -
    -
    - - - - - - - Lapdev - - { - move || if let Some(auth_providers) = cluster_info.with(|i| i.as_ref().map(|i| i.auth_providers.clone())) { - if !auth_providers.is_empty() { - view! { - - }.into_view() - } else { - view! { -
    - -
    - }.into_view() - } - } else { - view! { - - }.into_view() - } - } -
    -
    - } -} - -#[component] -pub fn LoginWithView(auth_providers: Vec) -> impl IntoView { - let login = use_context::>>().unwrap(); - view! { -
    -

    - Sign in to your account -

    -
    -
    - - - -
    -
    -
    - } -} - -#[component] -pub fn InitAuthProvidersView() -> impl IntoView { - view! { - - } -} - -#[component] -pub fn NavUserControl(user_control_hidden: RwSignal) -> impl IntoView { - let login = use_context::>>().unwrap(); - - let logout = move |_| { - user_control_hidden.set(true); - create_action(move |_| now_logout()).dispatch(()); - }; - - view! { -
    -
    - - { move || login.get().flatten().and_then(|l| l.name).unwrap_or("".to_string()) } - - - { move || login.get().flatten().and_then(|l| l.email).unwrap_or("".to_string()) } - -
    - - -
    - } -} - -#[component] -pub fn AccountSettings() -> impl IntoView { - view! { -
    -
    - User Settings -
    -

    {"Manage your account settings"}

    -
    - } -} - -async fn join_org(invitation_id: String) -> Result> { - let resp = Request::put(&format!("/api/v1/join/{invitation_id}")) - .send() - .await?; - if resp.status() != 204 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Ok(Some(error)); - } - let _ = window().location().set_href("/"); - Ok(None) -} - -#[component] -pub fn JoinView() -> impl IntoView { - let params = use_params_map(); - let id = Signal::derive(move || { - params - .with(|params| params.get("id").cloned()) - .unwrap_or_default() - }); - - let result = { - create_local_resource( - || (), - move |_| async move { join_org(id.get_untracked()).await }, - ) - }; - - view! { - { - move || if let Some(error) = result.with(|r| r.as_ref().and_then(|r| r.as_ref().ok().cloned())).flatten() { - view! { -
    - { error.error } -
    - }.into_view() - } else { - view! {}.into_view() - } - } - } -} diff --git a/lapdev-dashboard/src/app.rs b/lapdev-dashboard/src/app.rs deleted file mode 100644 index b270a4c..0000000 --- a/lapdev-dashboard/src/app.rs +++ /dev/null @@ -1,157 +0,0 @@ -use anyhow::Result; -use gloo_net::http::Request; -use lapdev_common::{console::MeUser, ClusterInfo}; -use leptos::{ - component, create_local_resource, create_rw_signal, provide_context, use_context, view, window, - IntoView, Resource, Show, Signal, SignalGet, -}; -use leptos_router::{Route, Router, Routes}; - -use crate::{ - account::{get_login, AccountSettings, JoinView, Login}, - audit_log::AuditLogView, - cluster::{ClusterSettings, ClusterUsersView, MachineTypeView, WorkspaceHostView}, - git_provider::GitProviderView, - license::{LicenseView, SignLicenseView}, - nav::{AdminSideNav, NavExpanded, SideNav, TopNav}, - organization::{NewOrgModal, OrgMembers, OrgSettings}, - project::{ProjectDetails, Projects}, - quota::QuotaView, - ssh_key::SshKeys, - usage::UsageView, - workspace::{WorkspaceDetails, Workspaces}, -}; - -#[derive(Clone)] -pub struct AppConfig { - pub show_lapdev_website: bool, -} - -#[component] -fn Root() -> impl IntoView { - view! { - - } -} - -async fn get_cluster_info() -> Result { - let resp = Request::get("/api/v1/cluster_info").send().await?; - let info: ClusterInfo = resp.json().await?; - Ok(info) -} - -pub fn set_context() { - let login_counter = create_rw_signal(0); - provide_context(login_counter); - - let login = create_local_resource( - move || login_counter.get(), - |_| async move { get_login().await.ok() }, - ); - provide_context(login); - - let cluster_info = - create_local_resource(move || (), |_| async move { get_cluster_info().await.ok() }); - let cluster_info = Signal::derive(move || cluster_info.get().flatten()); - provide_context(cluster_info); - - let current_org = Signal::derive(move || { - let login = login.get().flatten(); - login.map(|u| u.organization) - }); - provide_context(current_org); - - let pathname = window().location().pathname().unwrap_or_default(); - let nav_expanded = NavExpanded { - orgnization: create_rw_signal(pathname.starts_with("/organization")), - account: create_rw_signal(pathname.starts_with("/account")), - }; - provide_context(nav_expanded); - - provide_context(create_rw_signal(AppConfig { - show_lapdev_website: false, - })); -} - -#[component] -pub fn App() -> impl IntoView { - set_context(); - view! { - - - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - - - - } -} - -#[component] -pub fn WrappedView(element: T) -> impl IntoView -where - T: IntoView + Copy + 'static, -{ - let login = use_context::>>().unwrap(); - let new_org_modal_hidden = create_rw_signal(true); - view! { - } - > -
    - -
    - -
    - {element} -
    -
    - -
    -
    - } -} - -#[component] -pub fn AdminWrappedView(element: T) -> impl IntoView -where - T: IntoView + Copy + 'static, -{ - let login = use_context::>>().unwrap(); - view! { - } - > -
    - -
    - -
    - {element} -
    -
    -
    -
    - } -} diff --git a/lapdev-dashboard/src/lib.rs b/lapdev-dashboard/src/lib.rs deleted file mode 100644 index e63534c..0000000 --- a/lapdev-dashboard/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -pub mod account; -pub mod app; -pub mod audit_log; -pub mod cluster; -pub mod datepicker; -pub mod git_provider; -pub mod license; -pub mod modal; -pub mod nav; -pub mod organization; -pub mod project; -pub mod quota; -pub mod ssh_key; -pub mod usage; -pub mod workspace; diff --git a/lapdev-dashboard/src/main.rs b/lapdev-dashboard/src/main.rs deleted file mode 100644 index 275c129..0000000 --- a/lapdev-dashboard/src/main.rs +++ /dev/null @@ -1,11 +0,0 @@ -use lapdev_dashboard::app::App; -use leptos::{mount_to_body, view}; - -pub fn main() { - leptos::document().body().unwrap().set_inner_html(""); - mount_to_body(move || { - view! { - - } - }) -} diff --git a/lapdev-dashboard/src/modal.rs b/lapdev-dashboard/src/modal.rs deleted file mode 100644 index 772a182..0000000 --- a/lapdev-dashboard/src/modal.rs +++ /dev/null @@ -1,399 +0,0 @@ -use anyhow::Result; -use chrono::{DateTime, FixedOffset}; -use leptos::{ - component, create_effect, create_rw_signal, event_target_value, view, Action, IntoView, - RwSignal, SignalGet, SignalSet, SignalUpdate, SignalWith, View, -}; -use serde::Deserialize; - -#[derive(Debug, Deserialize, Clone)] -pub struct ErrorResponse { - pub error: String, -} - -impl From for ErrorResponse -where - E: Into, -{ - fn from(err: E) -> Self { - let err: anyhow::Error = err.into(); - Self { - error: err.to_string(), - } - } -} - -#[component] -pub fn CreationInput(label: String, value: RwSignal, placeholder: String) -> impl IntoView { - view! { -
    - - -
    - } -} - -#[component] -pub fn CreationModal( - title: String, - modal_hidden: RwSignal, - action: Action<(), Result<(), ErrorResponse>>, - body: T, - update_text: Option, - updating_text: Option, - create_button_hidden: Box bool + 'static>, - width_class: Option, -) -> impl IntoView -where - T: IntoView + 'static, -{ - let error = create_rw_signal(None); - let handle_create = move |_| { - error.set(None); - action.dispatch(()); - }; - let create_pending = action.pending(); - create_effect(move |_| { - modal_hidden.track(); - error.set(None); - }); - create_effect(move |_| { - action.value().with(|result| { - if let Some(result) = result { - match result { - Ok(_) => {} - Err(e) => { - error.set(Some(e.error.clone())); - } - } - } - }) - }); - - let width_class = if let Some(width_class) = width_class { - width_class - } else { - "max-w-2xl".to_string() - }; - view! { -
    -
    -
    -
    -

    - { title } -

    - -
    - { move || if let Some(error) = error.get() { - view! { -
    -
    - { error } -
    -
    - }.into_view() - } else { - view! {}.into_view() - } - } -
    - {body} -
    -
    - - -
    -
    -
    -
    - } -} - -#[component] -pub fn DeletionModal( - resource: String, - modal_hidden: RwSignal, - delete_action: Action<(), Result<(), ErrorResponse>>, -) -> impl IntoView { - let error = create_rw_signal(None); - let handle_delete = move |_| { - delete_action.dispatch(()); - }; - let delete_pending = delete_action.pending(); - create_effect(move |_| { - delete_action.value().with(|result| { - if let Some(result) = result { - match result { - Ok(_) => {} - Err(e) => { - error.set(Some(e.error.clone())); - } - } - } - }) - }); - - create_effect(move |_| { - if modal_hidden.get() { - error.set(None); - } - }); - - view! { -
    -
    -
    - -
    - -

    - Are you sure you want to delete - { resource } -

    - { move || if let Some(error) = error.get() { - view! { -
    - { error } -
    - }.into_view() - } else { - view! {}.into_view() - } - } - - -
    -
    -
    -
    - } -} - -#[component] -pub fn DatetimeModal(time: DateTime) -> impl IntoView { - let offset = web_sys::js_sys::Date::new_0().get_timezone_offset(); - let time = if let Some(offset) = FixedOffset::east_opt((offset * 60.0) as i32) { - time.with_timezone(&offset) - } else { - time - }; - - let hidden = create_rw_signal(true); - let duration = chrono::Utc::now() - time.with_timezone(&chrono::Utc); - let days = duration.num_days(); - let formatted = move || { - if days > 365 { - let unit = days / 365; - format!("{} year{} ago", unit, if unit > 1 { "s" } else { "" }) - } else if days > 30 { - let unit = days / 30; - format!("{} month{} ago", unit, if unit > 1 { "s" } else { "" }) - } else if days > 7 { - let unit = days / 7; - format!("{} week{} ago", unit, if unit > 1 { "s" } else { "" }) - } else if days > 0 { - let unit = days; - format!("{} day{} ago", unit, if unit > 1 { "s" } else { "" }) - } else { - let hours = duration.num_hours(); - if hours > 0 { - format!("{hours} hour{} ago", if hours > 1 { "s" } else { "" }) - } else { - let minutes = duration.num_minutes(); - if minutes > 0 { - format!("{minutes} minute{} ago", if minutes > 1 { "s" } else { "" }) - } else { - format!("{} seconds ago", duration.num_seconds()) - } - } - } - }; - view! { -
    -
    -
    - { format!("{}", time.format("%Y-%m-%d %H:%M:%S")) } -
    -
    - { formatted } -
    - } -} - -#[component] -pub fn SettingView( - title: String, - action: Action<(), Result<(), ErrorResponse>>, - body: T, - update_counter: RwSignal, - extra: Option, -) -> impl IntoView -where - T: IntoView + 'static, -{ - let success = create_rw_signal(None); - let error = create_rw_signal(None); - let handle_save = move |_| { - success.set(None); - error.set(None); - action.dispatch(()); - }; - let save_pending = action.pending(); - - create_effect(move |_| { - action.value().with(|result| { - if let Some(result) = result { - match result { - Ok(_) => { - update_counter.update(|c| *c += 1); - success.set(Some("Saved successfully")); - } - Err(e) => { - error.set(Some(e.error.clone())); - } - } - } - }) - }); - - view! { - { - if title.is_empty() { - view!{}.into_view() - } else { - view!{
    {title}
    }.into_view() - } - } - { body } -
    - - { - if let Some(extra) = extra { - extra.into_view() - } else { - view!{}.into_view() - } - } - { move || if let Some(error) = error.get() { - view! { -
    - { error } -
    - }.into_view() - } else { - view! {}.into_view() - } - } - { move || if let Some(success) = success.get() { - view! { -
    - { success } -
    - }.into_view() - } else { - view! {}.into_view() - } - } -
    - } -} diff --git a/lapdev-dashboard/src/organization.rs b/lapdev-dashboard/src/organization.rs deleted file mode 100644 index 24c59ed..0000000 --- a/lapdev-dashboard/src/organization.rs +++ /dev/null @@ -1,1038 +0,0 @@ -use std::{str::FromStr, time::Duration}; - -use anyhow::{anyhow, Result}; -use gloo_net::http::Request; -use lapdev_common::{ - console::{MeUser, Organization, OrganizationMember}, - ClusterInfo, NewOrganization, UpdateOrganizationAutoStartStop, UpdateOrganizationMember, - UpdateOrganizationName, UserRole, -}; -use leptos::{ - component, create_action, create_effect, create_local_resource, create_rw_signal, document, - event_target_checked, event_target_value, expect_context, leptos_dom::logging::console_log, - set_timeout, use_context, view, window, Action, For, IntoView, Resource, RwSignal, Signal, - SignalGet, SignalGetUntracked, SignalSet, SignalUpdate, SignalWith, -}; -use uuid::Uuid; -use wasm_bindgen::{JsCast, UnwrapThrowExt}; -use web_sys::FocusEvent; - -use crate::modal::{CreationModal, DatetimeModal, DeletionModal, ErrorResponse, SettingView}; - -async fn create_org(name: RwSignal) -> Result<()> { - let org_name = name.get_untracked(); - let resp = Request::post("/api/v1/organizations") - .json(&NewOrganization { name: org_name })? - .send() - .await?; - if resp.status() != 200 { - return Err(anyhow!("can't create organization")); - } - let _resp: Organization = resp.json().await?; - - let _ = window().location().set_href("/"); - - Ok(()) -} - -async fn switch_org(org_id: String) -> Result<()> { - let _ = Request::put(&format!("/api/private/me/organization/{org_id}")) - .send() - .await; - let _ = window().location().set_href("/"); - Ok(()) -} - -#[component] -pub fn OrgSelector(new_org_modal_hidden: RwSignal) -> impl IntoView { - let login = use_context::>>().unwrap(); - let hidden = create_rw_signal(true); - let toggle_dropdown = move |_| { - if hidden.get_untracked() { - hidden.set(false); - } else { - hidden.set(true); - } - }; - - let on_focusout = move |e: FocusEvent| { - let node = e - .current_target() - .unwrap_throw() - .unchecked_into::(); - - set_timeout( - move || { - let has_focus = if let Some(active) = document().active_element() { - let active: web_sys::Node = active.into(); - node.contains(Some(&active)) - } else { - false - }; - if !has_focus && !hidden.get_untracked() { - hidden.set(true); - } - }, - Duration::from_secs(0), - ); - }; - - view! { -
    - - -
    - } -} - -#[component] -pub fn NewOrgModal(modal_hidden: RwSignal) -> impl IntoView { - let org_name = create_rw_signal("".to_string()); - let action = create_action(move |_| create_org(org_name)); - let handle_create_org = move |_| { - action.dispatch(()); - }; - let create_pending = action.pending(); - view! { - - } -} - -async fn delete_org() -> Result> { - let org = - use_context::>>().ok_or_else(|| anyhow!("can't get org"))?; - let org = org - .get_untracked() - .ok_or_else(|| anyhow!("can't get org"))?; - let resp = Request::delete(&format!("/api/v1/organizations/{}", org.id)) - .send() - .await?; - if resp.status() == 200 { - let _ = window().location().set_href("/"); - } else { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Ok(Some(error)); - } - Ok(None) -} - -#[component] -pub fn OrgSettings() -> impl IntoView { - let modal_hidden = create_rw_signal(true); - let delete_action = create_action(|()| delete_org()); - let login = use_context::>>().unwrap(); - let cluster_info = expect_context::>>(); - view! { -
    -
    - Organization Settings -
    -

    {"Manage your organization's settings"}

    -
    -
    -
    - -
    -
    - -
    - -
    -
    - Delete Organization -
    - -
    -
    - - } -} - -#[component] -pub fn DeleteOrgModal( - modal_hidden: RwSignal, - delete_action: Action<(), Result>>, -) -> impl IntoView { - let error = create_rw_signal(None); - let confirmation = create_rw_signal(String::new()); - let handle_delete = move |_| { - delete_action.dispatch(()); - }; - let delete_pending = delete_action.pending(); - let org = use_context::>>().unwrap(); - create_effect(move |_| { - delete_action.value().with(|result| { - if let Some(result) = result { - match result { - Ok(err) => { - if let Some(err) = err { - error.set(Some(err.error.to_string())); - } - } - Err(e) => { - console_log(&format!("creation error: {e}")); - } - } - } - }) - }); - view! { -
    -
    -
    - -
    - -

    -

    Are you sure you want to delete organization

    -

    { move || org.get().map(|org| org.name).unwrap_or_default() }

    -

    - { move || if let Some(error) = error.get() { - view! { -
    - { error } -
    - }.into_view() - } else { - view! {}.into_view() - } - } -
    -

    {"1. You'll lose all the projects and workspaces in this organization and this cannot be restored."}

    -

    {"2. All organization members will lose access to this organization and all the associated workspaces."}

    -
    -

    - Type - { move || org.get().map(|org| org.name).unwrap_or_default() } - To Confirm -

    - - - -
    -
    -
    -
    - } -} - -async fn update_name(id: Uuid, name: String) -> Result<(), ErrorResponse> { - let resp = Request::put(&format!("/api/v1/organizations/{id}/name")) - .json(&UpdateOrganizationName { name })? - .send() - .await?; - - if resp.status() != 204 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Err(error); - } - - Ok(()) -} - -async fn update_auto_start_stop( - id: Uuid, - auto_start: bool, - allow_workspace_change_auto_start: bool, - auto_stop: Option, - allow_workspace_change_auto_stop: bool, -) -> Result<(), ErrorResponse> { - let resp = Request::put(&format!("/api/v1/organizations/{id}/auto_start_stop")) - .json(&UpdateOrganizationAutoStartStop { - auto_start, - auto_stop, - allow_workspace_change_auto_start, - allow_workspace_change_auto_stop, - })? - .send() - .await?; - - if resp.status() != 204 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Err(error); - } - - Ok(()) -} - -#[component] -fn AutoStartStopView() -> impl IntoView { - let org = use_context::>>().unwrap(); - let login_counter = expect_context::>(); - - let auto_start_enabled = create_rw_signal(false); - create_effect(move |_| { - if let Some(enabled) = org.with(|o| o.as_ref().map(|o| o.auto_start)) { - auto_start_enabled.set(enabled); - } - }); - - let allow_workspace_change_auto_start = create_rw_signal(false); - create_effect(move |_| { - if let Some(enabled) = org.with(|o| o.as_ref().map(|o| o.allow_workspace_change_auto_start)) - { - allow_workspace_change_auto_start.set(enabled); - } - }); - - let auto_stop_enabled = create_rw_signal(false); - create_effect(move |_| { - if let Some(enabled) = org.with(|o| o.as_ref().map(|o| o.auto_stop.is_some())) { - auto_stop_enabled.set(enabled); - } - }); - - let auto_stop_seconds = create_rw_signal(3600); - create_effect(move |_| { - if let Some(seconds) = org.with(|o| o.as_ref().map(|o| o.auto_stop)).flatten() { - auto_stop_seconds.set(seconds); - } - }); - - let allow_workspace_change_auto_stop = create_rw_signal(false); - create_effect(move |_| { - if let Some(enabled) = org.with(|o| o.as_ref().map(|o| o.allow_workspace_change_auto_stop)) - { - allow_workspace_change_auto_stop.set(enabled); - } - }); - - let save_action = create_action(move |_| async move { - if let Some(id) = org.with(|o| o.as_ref().map(|o| o.id)) { - update_auto_start_stop( - id, - auto_start_enabled.get_untracked(), - allow_workspace_change_auto_start.get_untracked(), - if auto_stop_enabled.get_untracked() { - Some(auto_stop_seconds.get_untracked()) - } else { - None - }, - allow_workspace_change_auto_stop.get_untracked(), - ) - .await - } else { - Err(ErrorResponse { - error: "Organization not loaded yet".to_string(), - }) - } - }); - - let body = view! { -
    - -
    -
    - -
    -
    - -
    -
    - -
    - { - move || if auto_stop_enabled.get() { - view! { -
    - - -
    - }.into_view() - } else { - view! {}.into_view() - } - } - }; - - view! { - - } -} - -#[component] -fn UpdateNameView() -> impl IntoView { - let org = use_context::>>().unwrap(); - let login_counter = expect_context::>(); - - let org_name = create_rw_signal(String::new()); - create_effect(move |_| { - if let Some(name) = org.with(|o| o.as_ref().map(|o| o.name.clone())) { - org_name.set(name); - } - }); - - let save_action = create_action(move |_| async move { - if let Some(id) = org.with(|o| o.as_ref().map(|o| o.id)) { - update_name(id, org_name.get_untracked()).await - } else { - Err(ErrorResponse { - error: "Organization not loaded yet".to_string(), - }) - } - }); - - let body = view! { -
    - -
    - }; - - view! { - - } -} - -async fn get_org_members() -> Result> { - let org = - use_context::>>().ok_or_else(|| anyhow!("can't get org"))?; - let org = org - .get_untracked() - .ok_or_else(|| anyhow!("can't get org"))?; - let resp = Request::get(&format!("/api/v1/organizations/{}/members", org.id)) - .send() - .await?; - let members: Vec = resp.json().await?; - Ok(members) -} - -#[component] -pub fn OrgMembers() -> impl IntoView { - let invite_member_modal_hidden = create_rw_signal(true); - let member_filter = create_rw_signal(String::new()); - - let update_counter = create_rw_signal(0); - let members = create_local_resource( - move || update_counter.get(), - move |_| async move { get_org_members().await }, - ); - let members = Signal::derive(move || { - let filter = member_filter.get(); - let mut members = members - .with(|m| m.as_ref().and_then(|m| m.as_ref().ok().cloned())) - .unwrap_or_default(); - if !filter.trim().is_empty() { - members.retain(|m| { - m.name - .as_ref() - .map(|n| n.contains(&filter)) - .unwrap_or(false) - }); - } - members - }); - - view! { -
    -
    - Organization Members -
    -

    {"Manage your organization's members"}

    -
    -
    -
    - -
    -
    - -
    - -
    -
    -
    - -
    -
    - -
    - Name - Joined - -
    - Role -
    - - -
    -
    - - - } - } - /> - - - } -} - -async fn delete_org_member( - user_id: Uuid, - delete_modal_hidden: RwSignal, - update_counter: RwSignal, -) -> Result<(), ErrorResponse> { - let current_org = - use_context::>>().ok_or_else(|| anyhow!("can't get org"))?; - let org = current_org - .get_untracked() - .ok_or_else(|| anyhow!("can't get org"))?; - let resp = Request::delete(&format!( - "/api/v1/organizations/{}/members/{user_id}", - org.id - )) - .send() - .await?; - if resp.status() != 204 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Err(error); - } - delete_modal_hidden.set(true); - update_counter.update(|c| *c += 1); - Ok(()) -} - -async fn update_org_member( - user_id: Uuid, - role: String, - update_modal_hidden: RwSignal, - update_counter: RwSignal, -) -> Result<(), ErrorResponse> { - let current_org = - use_context::>>().ok_or_else(|| anyhow!("can't get org"))?; - let org = current_org - .get_untracked() - .ok_or_else(|| anyhow!("can't get org"))?; - - let role: UserRole = match UserRole::from_str(&role) { - Ok(r) => r, - Err(_) => { - return Err(ErrorResponse { - error: "role is invalid".to_string(), - }) - } - }; - - let resp = Request::put(&format!( - "/api/v1/organizations/{}/members/{user_id}", - org.id - )) - .json(&UpdateOrganizationMember { role })? - .send() - .await?; - if resp.status() != 200 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Err(error); - } - update_modal_hidden.set(true); - update_counter.update(|c| *c += 1); - Ok(()) -} - -#[component] -fn MemberItemView( - i: usize, - member: OrganizationMember, - update_counter: RwSignal, -) -> impl IntoView { - let update_member_modal_hidden = create_rw_signal(true); - let delete_modal_hidden = create_rw_signal(true); - let delete_action = create_action(move |_| { - delete_org_member(member.user_id, delete_modal_hidden, update_counter) - }); - - view! { -
    0) - > - - user photo - {member.name.clone()} - -
    - -
    - -
    - {member.role.to_string()} -
    - - - -
    -
    - - - } -} - -#[component] -fn UpdateMemberView( - member: OrganizationMember, - update_modal_hidden: RwSignal, - update_counter: RwSignal, -) -> impl IntoView { - let role = create_rw_signal(member.role.to_string()); - let update_action = create_action(move |_| { - update_org_member( - member.user_id, - role.get_untracked(), - update_modal_hidden, - update_counter, - ) - }); - let select_on_change = move |ev: web_sys::Event| { - role.set(event_target_value(&ev)); - }; - let body = view! { -
    - - -
    - }; - view! { - - } -} - -#[component] -fn MemberControl( - delete_modal_hidden: RwSignal, - update_member_modal_hidden: RwSignal, - align_right: bool, -) -> impl IntoView { - let dropdown_hidden = create_rw_signal(true); - - let toggle_dropdown = move |_| { - if dropdown_hidden.get_untracked() { - dropdown_hidden.set(false); - } else { - dropdown_hidden.set(true); - } - }; - - let on_focusout = move |e: FocusEvent| { - let node = e - .current_target() - .unwrap_throw() - .unchecked_into::(); - - set_timeout( - move || { - let has_focus = if let Some(active) = document().active_element() { - let active: web_sys::Node = active.into(); - node.contains(Some(&active)) - } else { - false - }; - if !has_focus && !dropdown_hidden.get_untracked() { - dropdown_hidden.set(true); - } - }, - Duration::from_secs(0), - ); - }; - - let delete_member = { - move |_| { - dropdown_hidden.set(true); - delete_modal_hidden.set(false); - } - }; - - view! { -
    - - -
    - } -} - -async fn create_user_invitation() -> Result { - let org = - use_context::>>().ok_or_else(|| anyhow!("can't get org"))?; - let org = org - .get_untracked() - .ok_or_else(|| anyhow!("can't get org"))?; - let resp = Request::post(&format!("/api/v1/organizations/{}/invitations", org.id)) - .send() - .await?; - if resp.status() != 200 { - return Err(anyhow!(resp.text().await?)); - } - let invitation: String = resp.text().await?; - Ok(invitation) -} - -#[component] -fn InviteMemberView(invite_member_modal_hidden: RwSignal) -> impl IntoView { - let action = create_action(move |_| async move { Ok(()) }); - - let update_counter = create_rw_signal(0); - let invitation_resource = create_local_resource( - move || update_counter.get(), - move |_| async move { create_user_invitation().await }, - ); - let invitation = create_rw_signal(String::new()); - create_effect(move |_| { - let hidden = invite_member_modal_hidden.get(); - invitation.set(String::new()); - if !hidden { - update_counter.update(|c| *c += 1); - } - }); - - create_effect(move |_| { - if let Some(i) = - invitation_resource.with(|i| i.as_ref().and_then(|i| i.as_ref().ok().cloned())) - { - invitation.set(i); - } - }); - - let body = view! { -
    - - - { move || format!("{}/join/{}", window().location().origin().unwrap(), invitation.get()) } - - -
    - }; - view! { - - } -} diff --git a/lapdev-dashboard/src/ssh_key.rs b/lapdev-dashboard/src/ssh_key.rs deleted file mode 100644 index 70a5b9e..0000000 --- a/lapdev-dashboard/src/ssh_key.rs +++ /dev/null @@ -1,162 +0,0 @@ -use anyhow::Result; -use gloo_net::http::Request; -use lapdev_common::{NewSshKey, SshKey}; -use leptos::{ - component, create_action, create_local_resource, create_rw_signal, event_target_value, view, - For, IntoView, RwSignal, SignalGet, SignalGetUntracked, SignalSet, SignalUpdate, -}; - -use crate::modal::{CreationInput, CreationModal, DeletionModal, ErrorResponse}; - -async fn delete_ssh_key( - id: String, - delete_modal_hidden: RwSignal, - update_counter: RwSignal, -) -> Result<(), ErrorResponse> { - let resp = Request::delete(&format!("/api/v1/account/ssh_keys/{id}")) - .send() - .await?; - if resp.status() != 204 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Err(error); - } - delete_modal_hidden.set(true); - update_counter.update(|c| *c += 1); - Ok(()) -} - -#[component] -pub fn SshKeyItem(key: SshKey, update_counter: RwSignal) -> impl IntoView { - let id = key.id; - let name = key.name.clone(); - let delete_modal_hidden = create_rw_signal(true); - let delete_action = - create_action(move |_| delete_ssh_key(id.to_string(), delete_modal_hidden, update_counter)); - - view! { -
    - {key.name} - {key.key} - {key.created_at.to_rfc2822()} -
    - -
    - -
    - } -} - -async fn all_ssh_keys() -> Result> { - let resp = Request::get("/api/v1/account/ssh_keys").send().await?; - let ssh_keys: Vec = resp.json().await?; - Ok(ssh_keys) -} - -#[component] -pub fn SshKeys() -> impl IntoView { - let update_counter = create_rw_signal(0); - let ssh_keys = create_local_resource( - move || update_counter.get(), - |_| async move { all_ssh_keys().await.unwrap_or_default() }, - ); - - view! { -
    -
    -
    -
    -
    - SSH Keys -
    -

    {"Manage your SSH keys. Add your SSH public keys to your account will enable you to SSH into all your workspaces."}

    -
    - -
    -
    -
    -
    - - } - } - /> -
    -
    -
    - } -} - -async fn create_ssh_key( - name: RwSignal, - key: RwSignal, - modal_hidden: RwSignal, - update_counter: RwSignal, -) -> Result<(), ErrorResponse> { - let resp = Request::post("/api/v1/account/ssh_keys") - .json(&NewSshKey { - name: name.get_untracked(), - key: key.get_untracked(), - })? - .send() - .await?; - if resp.status() != 200 { - let error = resp - .json::() - .await - .unwrap_or_else(|_| ErrorResponse { - error: "Internal Server Error".to_string(), - }); - return Err(error); - } - - modal_hidden.set(true); - update_counter.update(|c| *c += 1); - name.set(String::new()); - key.set(String::new()); - - Ok(()) -} - -#[component] -pub fn NewSshKey(update_counter: RwSignal) -> impl IntoView { - let name = create_rw_signal(String::new()); - let key = create_rw_signal(String::new()); - - let modal_hidden = create_rw_signal(true); - let action = create_action(move |_| create_ssh_key(name, key, modal_hidden, update_counter)); - - let body = view! { - -
    - -