diff --git a/.github/actions/check-docker-image-changes/action.yml b/.github/actions/check-docker-image-changes/action.yml new file mode 100644 index 000000000..f76541e0f --- /dev/null +++ b/.github/actions/check-docker-image-changes/action.yml @@ -0,0 +1,80 @@ +name: Check Docker Image Changes +description: Determines if Docker image inputs have changed between current and base branch + +inputs: + base_ref: + description: 'Base branch ref for comparison (typically github.base_ref)' + required: false + default: '' + event_name: + description: 'GitHub event name (typically github.event_name)' + required: true + +outputs: + should_run: + description: 'Whether tests should run based on input changes' + value: ${{ steps.check.outputs.should_run }} + input_hash: + description: 'Current Docker image inputs hash' + value: ${{ steps.check.outputs.input_hash }} + base_hash: + description: 'Base branch Docker image inputs hash (empty if not a PR)' + value: ${{ steps.check.outputs.base_hash }} + +runs: + using: composite + steps: + - name: Get current Docker image inputs hash + id: current + shell: bash + run: | + HASH=$(nix run --accept-flake-config .#docker-image-inputs -- hash) + echo "hash=$HASH" >> "$GITHUB_OUTPUT" + echo "Current Docker image inputs hash: $HASH" + + - name: Get base branch Docker image inputs hash + id: base + if: inputs.event_name == 'pull_request' + shell: bash + run: | + # Fetch base branch + git fetch origin ${{ inputs.base_ref }} --depth=1 + + # Checkout base branch files temporarily + git checkout FETCH_HEAD -- . 2>/dev/null || true + + # Get hash from base branch + BASE_HASH=$(nix run --accept-flake-config .#docker-image-inputs -- hash 2>/dev/null || echo "") + + # Restore current branch + git checkout HEAD -- . + + echo "hash=$BASE_HASH" >> "$GITHUB_OUTPUT" + echo "Base branch Docker image inputs hash: $BASE_HASH" + + - name: Determine if tests should run + id: check + shell: bash + run: | + CURRENT_HASH="${{ steps.current.outputs.hash }}" + BASE_HASH="${{ steps.base.outputs.hash }}" + + echo "input_hash=$CURRENT_HASH" >> "$GITHUB_OUTPUT" + echo "base_hash=$BASE_HASH" >> "$GITHUB_OUTPUT" + + if [[ "${{ inputs.event_name }}" == "workflow_dispatch" ]]; then + echo "Workflow dispatch - running tests" + echo "should_run=true" >> "$GITHUB_OUTPUT" + elif [[ "${{ inputs.event_name }}" == "push" ]]; then + echo "Push to protected branch - running tests" + echo "should_run=true" >> "$GITHUB_OUTPUT" + elif [[ -z "$BASE_HASH" ]]; then + echo "Could not get base hash - running tests to be safe" + echo "should_run=true" >> "$GITHUB_OUTPUT" + elif [[ "$CURRENT_HASH" != "$BASE_HASH" ]]; then + echo "Docker image inputs changed ($BASE_HASH -> $CURRENT_HASH) - running tests" + echo "should_run=true" >> "$GITHUB_OUTPUT" + else + echo "Docker image inputs unchanged - skipping tests" + echo "should_run=false" >> "$GITHUB_OUTPUT" + fi diff --git a/.github/workflows/cli-smoke-test.yml b/.github/workflows/cli-smoke-test.yml new file mode 100644 index 000000000..4bac1d725 --- /dev/null +++ b/.github/workflows/cli-smoke-test.yml @@ -0,0 +1,112 @@ +name: CLI Smoke Test + +on: + pull_request: + types: [opened, reopened, synchronize] + push: + branches: + - develop + - release/* + workflow_dispatch: + +permissions: + id-token: write + contents: read + +jobs: + check-changes: + name: Check Docker Image Changes + runs-on: blacksmith-2vcpu-ubuntu-2404 + outputs: + should_run: ${{ steps.check.outputs.should_run }} + input_hash: ${{ steps.check.outputs.input_hash }} + base_hash: ${{ steps.check.outputs.base_hash }} + steps: + - name: Checkout Repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install nix + uses: ./.github/actions/nix-install-ephemeral + with: + push-to-cache: 'false' + env: + DEV_AWS_ROLE: ${{ secrets.DEV_AWS_ROLE }} + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + + - name: Check Docker image changes + id: check + uses: ./.github/actions/check-docker-image-changes + with: + event_name: ${{ github.event_name }} + base_ref: ${{ github.base_ref }} + + cli-smoke-test: + name: CLI Smoke Test (PG ${{ matrix.pg_version }}) + needs: check-changes + if: needs.check-changes.outputs.should_run == 'true' + runs-on: large-linux-arm + timeout-minutes: 60 + strategy: + fail-fast: false + matrix: + pg_version: ['15', '17'] + steps: + - name: Checkout Repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install nix + uses: ./.github/actions/nix-install-ephemeral + with: + push-to-cache: 'false' + env: + DEV_AWS_ROLE: ${{ secrets.DEV_AWS_ROLE }} + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + + - name: Create Docker context + run: docker context create builders + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + with: + endpoint: builders + + - name: Build Docker image + run: | + DOCKERFILE="Dockerfile-${{ matrix.pg_version }}" + echo "Building $DOCKERFILE..." + # Tag with ECR prefix since CLI uses public.ecr.aws/supabase/postgres as base + docker build -f "$DOCKERFILE" -t "public.ecr.aws/supabase/postgres:${{ matrix.pg_version }}" . + + - name: Run CLI smoke test + run: | + echo "Running CLI smoke test for PostgreSQL ${{ matrix.pg_version }}..." + nix run --accept-flake-config .#cli-smoke-test -- --no-build ${{ matrix.pg_version }} + timeout-minutes: 10 + + - name: Show logs on failure + if: failure() + run: | + echo "=== Supabase Status ===" + nix run --accept-flake-config .#supabase-cli -- status || true + + echo "=== Docker containers ===" + docker ps -a + + echo "=== Database container logs ===" + docker logs supabase_db_postgres 2>&1 | tail -100 || true + + - name: Cleanup + if: always() + run: | + nix run --accept-flake-config .#supabase-cli -- stop --no-backup || true + + skip-notification: + name: CLI Smoke Test (Skipped) + needs: check-changes + if: needs.check-changes.outputs.should_run == 'false' + runs-on: ubuntu-latest + steps: + - name: Report skipped + run: | + echo "CLI smoke test skipped - Docker image inputs unchanged" + echo "Input hash: ${{ needs.check-changes.outputs.input_hash }}" diff --git a/.github/workflows/docker-image-test.yml b/.github/workflows/docker-image-test.yml new file mode 100644 index 000000000..1ebd48aee --- /dev/null +++ b/.github/workflows/docker-image-test.yml @@ -0,0 +1,139 @@ +name: Docker Image Test + +on: + pull_request: + types: [opened, reopened, synchronize] + push: + branches: + - develop + - release/* + workflow_call: + secrets: + DEV_AWS_ROLE: + required: true + NIX_SIGN_SECRET_KEY: + required: true + workflow_dispatch: + inputs: + dockerfile: + description: 'Specific Dockerfile to test (leave empty for all)' + required: false + default: '' + type: string + +permissions: + id-token: write + contents: read + +jobs: + check-changes: + name: Check Docker Image Changes + runs-on: blacksmith-2vcpu-ubuntu-2404 + outputs: + should_run: ${{ steps.check.outputs.should_run }} + input_hash: ${{ steps.check.outputs.input_hash }} + steps: + - name: Checkout Repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install nix + uses: ./.github/actions/nix-install-ephemeral + with: + push-to-cache: 'false' + env: + DEV_AWS_ROLE: ${{ secrets.DEV_AWS_ROLE }} + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + + - name: Check Docker image changes + id: check + uses: ./.github/actions/check-docker-image-changes + with: + event_name: ${{ github.event_name }} + base_ref: ${{ github.base_ref }} + + docker-image-test: + name: Test ${{ matrix.dockerfile }} + needs: check-changes + if: needs.check-changes.outputs.should_run == 'true' + runs-on: large-linux-arm + timeout-minutes: 120 + strategy: + fail-fast: false + matrix: + dockerfile: + - Dockerfile-15 + - Dockerfile-17 + - Dockerfile-orioledb-17 + steps: + - name: Checkout Repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install nix + uses: ./.github/actions/nix-install-ephemeral + with: + push-to-cache: 'false' + env: + DEV_AWS_ROLE: ${{ secrets.DEV_AWS_ROLE }} + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} + + - name: Create Docker context + run: docker context create builders + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + with: + endpoint: builders + + - name: Build Docker image + run: | + echo "Building ${{ matrix.dockerfile }}..." + VERSION="${{ matrix.dockerfile }}" + VERSION="${VERSION#Dockerfile-}" + # Build with tags expected by both tools + docker build -f ${{ matrix.dockerfile }} \ + -t "pg-docker-test:${VERSION}" \ + -t "supabase-postgres:${VERSION}-analyze" \ + . + + - name: Run image size analysis + run: | + echo "=== Image Size Analysis for ${{ matrix.dockerfile }} ===" + nix run --accept-flake-config .#image-size-analyzer -- --image ${{ matrix.dockerfile }} --no-build + + - name: Run Docker image tests + run: | + echo "=== Running tests for ${{ matrix.dockerfile }} ===" + nix run --accept-flake-config .#docker-image-test -- --no-build ${{ matrix.dockerfile }} + + - name: Show container logs on failure + if: failure() + run: | + VERSION="${{ matrix.dockerfile }}" + VERSION="${VERSION#Dockerfile-}" + CONTAINER_NAME=$(docker ps -a --filter "name=pg-test-${VERSION}" --format "{{.Names}}" | head -1) + if [[ -n "$CONTAINER_NAME" ]]; then + echo "=== Container logs for $CONTAINER_NAME ===" + docker logs "$CONTAINER_NAME" 2>&1 || true + fi + + - name: Cleanup + if: always() + run: | + VERSION="${{ matrix.dockerfile }}" + VERSION="${VERSION#Dockerfile-}" + # Remove test containers + docker ps -a --filter "name=pg-test-${VERSION}" -q | xargs -r docker rm -f || true + # Remove test images + docker rmi "pg-docker-test:${VERSION}" || true + docker rmi "supabase-postgres:${VERSION}-analyze" || true + + skip-notification: + name: Docker Image Test (Skipped) + needs: check-changes + if: needs.check-changes.outputs.should_run == 'false' + runs-on: ubuntu-latest + steps: + - name: Report skipped + run: | + echo "Docker image tests skipped - inputs unchanged" + echo "Input hash: ${{ needs.check-changes.outputs.input_hash }}" diff --git a/.github/workflows/nix-build.yml b/.github/workflows/nix-build.yml index 509893eb0..912ee6d28 100644 --- a/.github/workflows/nix-build.yml +++ b/.github/workflows/nix-build.yml @@ -213,3 +213,15 @@ jobs: (needs.nix-build-packages-x86_64-linux.result == 'skipped' || needs.nix-build-packages-x86_64-linux.result == 'success') && (needs.nix-build-checks-x86_64-linux.result == 'skipped' || needs.nix-build-checks-x86_64-linux.result == 'success') uses: ./.github/workflows/test.yml + + docker-image-test: + needs: [nix-eval, nix-build-packages-aarch64-linux, nix-build-checks-aarch64-linux] + if: | + !cancelled() && + needs.nix-eval.result == 'success' && + (needs.nix-build-packages-aarch64-linux.result == 'skipped' || needs.nix-build-packages-aarch64-linux.result == 'success') && + (needs.nix-build-checks-aarch64-linux.result == 'skipped' || needs.nix-build-checks-aarch64-linux.result == 'success') + uses: ./.github/workflows/docker-image-test.yml + secrets: + DEV_AWS_ROLE: ${{ secrets.DEV_AWS_ROLE }} + NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} diff --git a/Dockerfile-15 b/Dockerfile-15 index 7ef96c087..7d4b6c87e 100644 --- a/Dockerfile-15 +++ b/Dockerfile-15 @@ -1,211 +1,132 @@ # syntax=docker/dockerfile:1.6 -ARG postgresql_major=15 -ARG postgresql_release=${postgresql_major}.1 - -# Bump default build arg to build a package from source -# Bump vars.yml to specify runtime package version -ARG sfcgal_release=1.3.10 -ARG postgis_release=3.3.2 -ARG pgrouting_release=3.4.1 -ARG pgtap_release=1.2.0 -ARG pg_cron_release=1.6.2 -ARG pgaudit_release=1.7.0 -ARG pgsql_http_release=1.5.0 -ARG plpgsql_check_release=2.2.5 -ARG pg_safeupdate_release=1.4 -ARG timescaledb_release=2.9.1 -ARG wal2json_release=2_5 -ARG pljava_release=1.6.4 -ARG plv8_release=3.1.5 -ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 -ARG pg_net_release=0.7.1 -ARG rum_release=1.3.13 -ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 -ARG libsodium_release=1.0.18 -ARG pgsodium_release=3.1.6 -ARG pg_graphql_release=1.5.11 -ARG pg_stat_monitor_release=1.1.1 -ARG pg_jsonschema_release=0.1.4 -ARG pg_repack_release=1.4.8 -ARG vault_release=0.2.8 -ARG groonga_release=12.0.8 -ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.5.7 -ARG hypopg_release=1.3.1 -ARG pgvector_release=0.4.0 -ARG pg_tle_release=1.3.2 -ARG index_advisor_release=0.2.0 -ARG supautils_release=2.2.0 -ARG wal_g_release=2.0.1 - -FROM ubuntu:noble as base - -# Create reusable apt mirror fallback function -RUN echo '#!/bin/bash\n\ -apt_update_with_fallback() {\n\ - local sources_file="/etc/apt/sources.list.d/ubuntu.sources"\n\ - local max_attempts=2\n\ - local attempt=1\n\ - local mirrors="archive.ubuntu.com us.archive.ubuntu.com"\n\ - \n\ - for mirror in $mirrors; do\n\ - echo "========================================="\n\ - echo "Attempting apt-get update with mirror: ${mirror}"\n\ - echo "Attempt ${attempt} of ${max_attempts}"\n\ - echo "========================================="\n\ - \n\ - if [ -f "${sources_file}" ]; then\n\ - sed -i "s|http://[^/]*/ubuntu/|http://${mirror}/ubuntu/|g" "${sources_file}"\n\ - fi\n\ - \n\ - if timeout 300 apt-get update 2>&1; then\n\ - echo "========================================="\n\ - echo "✓ Successfully updated apt cache using mirror: ${mirror}"\n\ - echo "========================================="\n\ - return 0\n\ - else\n\ - local exit_code=$?\n\ - echo "========================================="\n\ - echo "✗ Failed to update using mirror: ${mirror}"\n\ - echo "Exit code: ${exit_code}"\n\ - echo "========================================="\n\ - \n\ - apt-get clean\n\ - rm -rf /var/lib/apt/lists/*\n\ - \n\ - if [ ${attempt} -lt ${max_attempts} ]; then\n\ - local sleep_time=$((attempt * 5))\n\ - echo "Waiting ${sleep_time} seconds before trying next mirror..."\n\ - sleep ${sleep_time}\n\ - fi\n\ - fi\n\ - \n\ - attempt=$((attempt + 1))\n\ - done\n\ - \n\ - echo "========================================="\n\ - echo "ERROR: All mirror tiers failed after ${max_attempts} attempts"\n\ - echo "========================================="\n\ - return 1\n\ -}' > /usr/local/bin/apt-update-fallback.sh && chmod +x /usr/local/bin/apt-update-fallback.sh - -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt install -y \ +# Alpine-based slim PostgreSQL 15 image with Nix extensions + +#################### +# Stage 1: Nix builder +#################### +FROM alpine:3.21 AS nix-builder + +# Install dependencies for nix installer (coreutils for GNU cp, sudo for installer) +RUN apk add --no-cache \ + bash \ + coreutils \ curl \ - gnupg \ - lsb-release \ - software-properties-common \ - wget \ + shadow \ sudo \ - && apt clean + xz +# Create users (Alpine syntax) +RUN addgroup -S postgres && \ + adduser -S -h /var/lib/postgresql -s /bin/bash -G postgres postgres && \ + addgroup -S wal-g && \ + adduser -S -s /bin/bash -G wal-g wal-g -RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres -RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +# Create nix config RUN cat < /tmp/extra-nix.conf extra-experimental-features = nix-command flakes extra-substituters = https://nix-postgres-artifacts.s3.amazonaws.com extra-trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI= EOF + +# Install nix RUN curl -L https://releases.nixos.org/nix/nix-2.32.2/install | sh -s -- --daemon --no-channel-add --yes --nix-extra-conf-file /tmp/extra-nix.conf -ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" -COPY . /nixpg +ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" WORKDIR /nixpg +COPY . . -RUN nix profile add path:.#psql_15/bin +# Build PostgreSQL with extensions +RUN nix profile add path:.#psql_15_slim/bin RUN nix store gc +# Build groonga and copy plugins +RUN nix profile add path:.#supabase-groonga && \ + mkdir -p /tmp/groonga-plugins && \ + cp -r /nix/var/nix/profiles/default/lib/groonga/plugins /tmp/groonga-plugins/ -WORKDIR / - - -RUN mkdir -p /usr/lib/postgresql/bin \ - /usr/lib/postgresql/share/postgresql \ - /usr/share/postgresql \ - /var/lib/postgresql \ - && chown -R postgres:postgres /usr/lib/postgresql \ - && chown -R postgres:postgres /var/lib/postgresql \ - && chown -R postgres:postgres /usr/share/postgresql - -# Create symbolic links -RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ - && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ - && chown -R postgres:postgres /usr/bin - -# Create symbolic links for PostgreSQL shares -RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ -RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ -RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ -RUN chown -R postgres:postgres /usr/share/postgresql/ - -RUN chown -R postgres:postgres /usr/lib/postgresql - -RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets - +RUN nix store gc -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && \ - apt-get install -y --no-install-recommends tzdata +#################### +# Stage 2: Gosu builder +#################### +FROM alpine:3.21 AS gosu-builder -RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ - dpkg-reconfigure --frontend noninteractive tzdata +ARG TARGETARCH +ARG GOSU_VERSION=1.16 -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && \ - apt-get install -y --no-install-recommends \ - build-essential \ - checkinstall \ - cmake +RUN apk add --no-cache gnupg curl -ENV PGDATA=/var/lib/postgresql/data +# Download and verify gosu +RUN curl -fsSL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${TARGETARCH}" -o /usr/local/bin/gosu && \ + curl -fsSL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${TARGETARCH}.asc" -o /usr/local/bin/gosu.asc && \ + GNUPGHOME="$(mktemp -d)" && \ + export GNUPGHOME && \ + gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + rm -rf "$GNUPGHOME" /usr/local/bin/gosu.asc && \ + chmod +x /usr/local/bin/gosu -WORKDIR / #################### -# setup-groonga +# Stage 3: Final production image #################### -FROM base as groonga - -WORKDIR /nixpg +FROM alpine:3.21 AS production -RUN nix profile add path:.#supabase-groonga && \ - mkdir -p /tmp/groonga-plugins && \ - cp -r /nix/var/nix/profiles/default/lib/groonga/plugins /tmp/groonga-plugins/ +# Install minimal runtime dependencies +RUN apk add --no-cache \ + bash \ + curl \ + shadow \ + su-exec \ + tzdata \ + musl-locales \ + musl-locales-lang \ + && rm -rf /var/cache/apk/* + +# Create postgres user/group +RUN addgroup -S postgres && \ + adduser -S -G postgres -h /var/lib/postgresql -s /bin/bash postgres && \ + addgroup -S wal-g && \ + adduser -S -G wal-g -s /bin/bash wal-g && \ + adduser postgres wal-g + +# Copy Nix store and profiles from builder (profile already created by nix profile install) +COPY --from=nix-builder /nix /nix + +# Copy groonga plugins +COPY --from=nix-builder /tmp/groonga-plugins/plugins /usr/lib/groonga/plugins + +# Copy gosu +COPY --from=gosu-builder /usr/local/bin/gosu /usr/local/bin/gosu + +# Setup PostgreSQL directories +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql/data \ + /var/run/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql \ + && chown -R postgres:postgres /var/run/postgresql -RUN nix store gc +# Create symbolic links for binaries +RUN for f in /nix/var/nix/profiles/default/bin/*; do \ + ln -sf "$f" /usr/lib/postgresql/bin/ 2>/dev/null || true; \ + ln -sf "$f" /usr/bin/ 2>/dev/null || true; \ + done -WORKDIR / -# #################### -# # Download gosu for easy step-down from root -# #################### -FROM base as gosu -ARG TARGETARCH -# Install dependencies -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt-get install -y --no-install-recommends \ - gnupg \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* -# Download binary -ARG GOSU_VERSION=1.16 -ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ - /usr/local/bin/gosu -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ - /usr/local/bin/gosu.asc -# Verify checksum -RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ - gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ - gpgconf --kill all && \ - chmod +x /usr/local/bin/gosu +# Create symbolic links for PostgreSQL shares +RUN ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ 2>/dev/null || true && \ + ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ 2>/dev/null || true && \ + ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets 2>/dev/null || true -# #################### -# # Build final image -# #################### -FROM gosu as production -RUN id postgres || (echo "postgres user does not exist" && exit 1) -# # Setup extensions -COPY --from=groonga /tmp/groonga-plugins/plugins /usr/lib/groonga/plugins +# Set permissions +RUN chown -R postgres:postgres /usr/lib/postgresql && \ + chown -R postgres:postgres /usr/share/postgresql -# # Initialise configs +# Setup configs COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf @@ -219,52 +140,47 @@ COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_repli COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh +# Configure PostgreSQL settings RUN sed -i \ -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ -e "s|#include = '/etc/postgresql-custom/supautils.conf'|include = '/etc/postgresql-custom/supautils.conf'|g" \ -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ - echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ + echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ - usermod -aG postgres wal-g && \ chown -R postgres:postgres /etc/postgresql-custom -# # Include schema migrations +# Include schema migrations COPY migrations/db /docker-entrypoint-initdb.d/ COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql -# # Add upstream entrypoint script pinned for now to last tested version -COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +# Add entrypoint script ADD --chmod=0755 \ https://github.com/docker-library/postgres/raw/889f9447cd2dfe21cccfbe9bb7945e3b037e02d8/15/bullseye/docker-entrypoint.sh \ /usr/local/bin/docker-entrypoint.sh -RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql - -ENTRYPOINT ["docker-entrypoint.sh"] - -HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost -STOPSIGNAL SIGINT -EXPOSE 5432 - -ENV POSTGRES_HOST=/var/run/postgresql -ENV POSTGRES_USER=supabase_admin -ENV POSTGRES_DB=postgres -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt-get install -y --no-install-recommends \ - locales \ - && rm -rf /var/lib/apt/lists/* && \ - localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ - && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 -RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 -ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive +# Setup pgsodium key script RUN mkdir -p /usr/share/postgresql/extension/ && \ ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh +# Environment variables +ENV PATH="/nix/var/nix/profiles/default/bin:/usr/lib/postgresql/bin:${PATH}" +ENV PGDATA=/var/lib/postgresql/data +ENV POSTGRES_HOST=/var/run/postgresql +ENV POSTGRES_USER=supabase_admin +ENV POSTGRES_DB=postgres +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 ENV GRN_PLUGINS_DIR=/usr/lib/groonga/plugins +# Point to minimal glibc locales included in slim Nix package for initdb locale support +ENV LOCALE_ARCHIVE=/nix/var/nix/profiles/default/lib/locale/locale-archive + +ENTRYPOINT ["docker-entrypoint.sh"] +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Dockerfile-17 b/Dockerfile-17 index 1334f0744..3eb983bba 100644 --- a/Dockerfile-17 +++ b/Dockerfile-17 @@ -1,215 +1,132 @@ # syntax=docker/dockerfile:1.6 -ARG postgresql_major=17-orioledb -ARG postgresql_release=${postgresql_major}.1 - -# Bump default build arg to build a package from source -# Bump vars.yml to specify runtime package version -ARG sfcgal_release=1.3.10 -ARG postgis_release=3.3.2 -ARG pgrouting_release=3.4.1 -ARG pgtap_release=1.2.0 -ARG pg_cron_release=1.6.2 -ARG pgaudit_release=1.7.0 -ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 -ARG pgsql_http_release=1.5.0 -ARG plpgsql_check_release=2.2.5 -ARG pg_safeupdate_release=1.4 -ARG timescaledb_release=2.9.1 -ARG wal2json_release=2_5 -ARG pljava_release=1.6.4 -ARG plv8_release=3.1.5 -ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 -ARG pg_net_release=0.7.1 -ARG rum_release=1.3.13 -ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 -ARG libsodium_release=1.0.18 -ARG pgsodium_release=3.1.6 -ARG pg_graphql_release=1.5.11 -ARG pg_stat_monitor_release=1.1.1 -ARG pg_jsonschema_release=0.1.4 -ARG pg_repack_release=1.4.8 -ARG vault_release=0.2.8 -ARG groonga_release=12.0.8 -ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.5.7 -ARG hypopg_release=1.3.1 -ARG pgvector_release=0.4.0 -ARG pg_tle_release=1.3.2 -ARG index_advisor_release=0.2.0 -ARG supautils_release=2.2.0 -ARG wal_g_release=3.0.5 - -FROM ubuntu:noble as base - -# Create reusable apt mirror fallback function -RUN echo '#!/bin/bash\n\ -apt_update_with_fallback() {\n\ - local sources_file="/etc/apt/sources.list.d/ubuntu.sources"\n\ - local max_attempts=2\n\ - local attempt=1\n\ - local mirrors="archive.ubuntu.com us.archive.ubuntu.com"\n\ - \n\ - for mirror in $mirrors; do\n\ - echo "========================================="\n\ - echo "Attempting apt-get update with mirror: ${mirror}"\n\ - echo "Attempt ${attempt} of ${max_attempts}"\n\ - echo "========================================="\n\ - \n\ - if [ -f "${sources_file}" ]; then\n\ - sed -i "s|http://[^/]*/ubuntu/|http://${mirror}/ubuntu/|g" "${sources_file}"\n\ - fi\n\ - \n\ - if timeout 300 apt-get update 2>&1; then\n\ - echo "========================================="\n\ - echo "✓ Successfully updated apt cache using mirror: ${mirror}"\n\ - echo "========================================="\n\ - return 0\n\ - else\n\ - local exit_code=$?\n\ - echo "========================================="\n\ - echo "✗ Failed to update using mirror: ${mirror}"\n\ - echo "Exit code: ${exit_code}"\n\ - echo "========================================="\n\ - \n\ - apt-get clean\n\ - rm -rf /var/lib/apt/lists/*\n\ - \n\ - if [ ${attempt} -lt ${max_attempts} ]; then\n\ - local sleep_time=$((attempt * 5))\n\ - echo "Waiting ${sleep_time} seconds before trying next mirror..."\n\ - sleep ${sleep_time}\n\ - fi\n\ - fi\n\ - \n\ - attempt=$((attempt + 1))\n\ - done\n\ - \n\ - echo "========================================="\n\ - echo "ERROR: All mirror tiers failed after ${max_attempts} attempts"\n\ - echo "========================================="\n\ - return 1\n\ -}' > /usr/local/bin/apt-update-fallback.sh && chmod +x /usr/local/bin/apt-update-fallback.sh - -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt install -y \ +# Alpine-based slim PostgreSQL 17 image with Nix extensions + +#################### +# Stage 1: Nix builder +#################### +FROM alpine:3.21 AS nix-builder + +# Install dependencies for nix installer (coreutils for GNU cp, sudo for installer) +RUN apk add --no-cache \ + bash \ + coreutils \ curl \ - gnupg \ - lsb-release \ - software-properties-common \ - wget \ + shadow \ sudo \ - tree \ - && apt clean + xz +# Create users (Alpine syntax) +RUN addgroup -S postgres && \ + adduser -S -h /var/lib/postgresql -s /bin/bash -G postgres postgres && \ + addgroup -S wal-g && \ + adduser -S -s /bin/bash -G wal-g wal-g -RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres -RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +# Create nix config RUN cat < /tmp/extra-nix.conf extra-experimental-features = nix-command flakes extra-substituters = https://nix-postgres-artifacts.s3.amazonaws.com extra-trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI= EOF + +# Install nix RUN curl -L https://releases.nixos.org/nix/nix-2.32.2/install | sh -s -- --daemon --no-channel-add --yes --nix-extra-conf-file /tmp/extra-nix.conf ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" -COPY . /nixpg - WORKDIR /nixpg +COPY . . -RUN nix profile add path:.#psql_17/bin +# Build PostgreSQL with extensions +RUN nix profile add path:.#psql_17_slim/bin RUN nix store gc -WORKDIR / - - -RUN mkdir -p /usr/lib/postgresql/bin \ - /usr/lib/postgresql/share/postgresql \ - /usr/share/postgresql \ - /var/lib/postgresql \ - && chown -R postgres:postgres /usr/lib/postgresql \ - && chown -R postgres:postgres /var/lib/postgresql \ - && chown -R postgres:postgres /usr/share/postgresql - -# Create symbolic links -RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ - && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ - && chown -R postgres:postgres /usr/bin - -# Create symbolic links for PostgreSQL shares -RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ -RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ -RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ -RUN chown -R postgres:postgres /usr/share/postgresql/ - -RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 - -RUN chown -R postgres:postgres /usr/lib/postgresql - -RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets +# Build groonga and copy plugins +RUN nix profile add path:.#supabase-groonga && \ + mkdir -p /tmp/groonga-plugins && \ + cp -r /nix/var/nix/profiles/default/lib/groonga/plugins /tmp/groonga-plugins/ +RUN nix store gc -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && \ - apt-get install -y --no-install-recommends tzdata +#################### +# Stage 2: Gosu builder +#################### +FROM alpine:3.21 AS gosu-builder -RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ - dpkg-reconfigure --frontend noninteractive tzdata +ARG TARGETARCH +ARG GOSU_VERSION=1.16 -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && \ - apt-get install -y --no-install-recommends \ - build-essential \ - checkinstall \ - cmake +RUN apk add --no-cache gnupg curl -ENV PGDATA=/var/lib/postgresql/data +# Download and verify gosu +RUN curl -fsSL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${TARGETARCH}" -o /usr/local/bin/gosu && \ + curl -fsSL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${TARGETARCH}.asc" -o /usr/local/bin/gosu.asc && \ + GNUPGHOME="$(mktemp -d)" && \ + export GNUPGHOME && \ + gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + rm -rf "$GNUPGHOME" /usr/local/bin/gosu.asc && \ + chmod +x /usr/local/bin/gosu -WORKDIR / #################### -# setup-groonga +# Stage 3: Final production image #################### -FROM base as groonga +FROM alpine:3.21 AS production -WORKDIR /nixpg - -RUN nix profile add path:.#supabase-groonga && \ - mkdir -p /tmp/groonga-plugins && \ - cp -r /nix/var/nix/profiles/default/lib/groonga/plugins /tmp/groonga-plugins/ +# Install minimal runtime dependencies +RUN apk add --no-cache \ + bash \ + curl \ + shadow \ + su-exec \ + tzdata \ + musl-locales \ + musl-locales-lang \ + && rm -rf /var/cache/apk/* + +# Create postgres user/group +RUN addgroup -S postgres && \ + adduser -S -G postgres -h /var/lib/postgresql -s /bin/bash postgres && \ + addgroup -S wal-g && \ + adduser -S -G wal-g -s /bin/bash wal-g && \ + adduser postgres wal-g + +# Copy Nix store and profiles from builder (profile already created by nix profile install) +COPY --from=nix-builder /nix /nix + +# Copy groonga plugins +COPY --from=nix-builder /tmp/groonga-plugins/plugins /usr/lib/groonga/plugins + +# Copy gosu +COPY --from=gosu-builder /usr/local/bin/gosu /usr/local/bin/gosu + +# Setup PostgreSQL directories +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql/data \ + /var/run/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql \ + && chown -R postgres:postgres /var/run/postgresql -RUN nix store gc +# Create symbolic links for binaries +RUN for f in /nix/var/nix/profiles/default/bin/*; do \ + ln -sf "$f" /usr/lib/postgresql/bin/ 2>/dev/null || true; \ + ln -sf "$f" /usr/bin/ 2>/dev/null || true; \ + done -WORKDIR / -# #################### -# # Download gosu for easy step-down from root -# #################### -FROM base as gosu -ARG TARGETARCH -# Install dependencies -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt-get install -y --no-install-recommends \ - gnupg \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* -# Download binary -ARG GOSU_VERSION=1.16 -ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ - /usr/local/bin/gosu -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ - /usr/local/bin/gosu.asc -# Verify checksum -RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ - gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ - gpgconf --kill all && \ - chmod +x /usr/local/bin/gosu +# Create symbolic links for PostgreSQL shares +RUN ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ 2>/dev/null || true && \ + ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ 2>/dev/null || true && \ + ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets 2>/dev/null || true -# #################### -# # Build final image -# #################### -FROM gosu as production -RUN id postgres || (echo "postgres user does not exist" && exit 1) -# # Setup extensions -COPY --from=groonga /tmp/groonga-plugins/plugins /usr/lib/groonga/plugins +# Set permissions +RUN chown -R postgres:postgres /usr/lib/postgresql && \ + chown -R postgres:postgres /usr/share/postgresql -# # Initialise configs +# Setup configs COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf @@ -223,6 +140,7 @@ COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_repli COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh +# Configure PostgreSQL settings RUN sed -i \ -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ @@ -230,54 +148,45 @@ RUN sed -i \ -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ - usermod -aG postgres wal-g && \ chown -R postgres:postgres /etc/postgresql-custom - # Remove items from postgresql.conf -RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" - #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present -RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" -RUN sed -i 's/ timescaledb,//g; s/ plv8,//g' "/etc/postgresql-custom/supautils.conf" +# Remove timescaledb and plv8 references (not in pg17) +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" && \ + sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" && \ + sed -i 's/ timescaledb,//g; s/ plv8,//g' "/etc/postgresql-custom/supautils.conf" - - -# # Include schema migrations +# Include schema migrations COPY migrations/db /docker-entrypoint-initdb.d/ COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql -# # Add upstream entrypoint script pinned for now to last tested version -COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +# Add entrypoint script ADD --chmod=0755 \ https://github.com/docker-library/postgres/raw/889f9447cd2dfe21cccfbe9bb7945e3b037e02d8/17/bullseye/docker-entrypoint.sh \ /usr/local/bin/docker-entrypoint.sh -RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql - -ENTRYPOINT ["docker-entrypoint.sh"] - -HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost -STOPSIGNAL SIGINT -EXPOSE 5432 +# Setup pgsodium key script +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh +# Environment variables +ENV PATH="/nix/var/nix/profiles/default/bin:/usr/lib/postgresql/bin:${PATH}" +ENV PGDATA=/var/lib/postgresql/data ENV POSTGRES_HOST=/var/run/postgresql ENV POSTGRES_USER=supabase_admin ENV POSTGRES_DB=postgres ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt-get install -y --no-install-recommends \ - locales \ - && rm -rf /var/lib/apt/lists/* && \ - localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ - && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 -RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 -ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive -RUN mkdir -p /usr/share/postgresql/extension/ && \ - ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ - chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh - +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 ENV GRN_PLUGINS_DIR=/usr/lib/groonga/plugins +# Point to minimal glibc locales included in slim Nix package for initdb locale support +ENV LOCALE_ARCHIVE=/nix/var/nix/profiles/default/lib/locale/locale-archive + +ENTRYPOINT ["docker-entrypoint.sh"] +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/Dockerfile-orioledb-17 b/Dockerfile-orioledb-17 index 78072e03c..1a72b2a36 100644 --- a/Dockerfile-orioledb-17 +++ b/Dockerfile-orioledb-17 @@ -1,215 +1,132 @@ # syntax=docker/dockerfile:1.6 -ARG postgresql_major=17-orioledb -ARG postgresql_release=${postgresql_major}.1 - -# Bump default build arg to build a package from source -# Bump vars.yml to specify runtime package version -ARG sfcgal_release=1.3.10 -ARG postgis_release=3.3.2 -ARG pgrouting_release=3.4.1 -ARG pgtap_release=1.2.0 -ARG pg_cron_release=1.6.2 -ARG pgaudit_release=1.7.0 -ARG pgjwt_release=9742dab1b2f297ad3811120db7b21451bca2d3c9 -ARG pgsql_http_release=1.5.0 -ARG plpgsql_check_release=2.2.5 -ARG pg_safeupdate_release=1.4 -ARG timescaledb_release=2.9.1 -ARG wal2json_release=2_5 -ARG pljava_release=1.6.4 -ARG plv8_release=3.1.5 -ARG pg_plan_filter_release=5081a7b5cb890876e67d8e7486b6a64c38c9a492 -ARG pg_net_release=0.7.1 -ARG rum_release=1.3.13 -ARG pg_hashids_release=cd0e1b31d52b394a0df64079406a14a4f7387cd6 -ARG libsodium_release=1.0.18 -ARG pgsodium_release=3.1.6 -ARG pg_graphql_release=1.5.11 -ARG pg_stat_monitor_release=1.1.1 -ARG pg_jsonschema_release=0.1.4 -ARG pg_repack_release=1.4.8 -ARG vault_release=0.2.8 -ARG groonga_release=12.0.8 -ARG pgroonga_release=2.4.0 -ARG wrappers_release=0.5.7 -ARG hypopg_release=1.3.1 -ARG pgvector_release=0.4.0 -ARG pg_tle_release=1.3.2 -ARG index_advisor_release=0.2.0 -ARG supautils_release=2.2.0 -ARG wal_g_release=3.0.5 - -FROM ubuntu:noble as base - -# Create reusable apt mirror fallback function -RUN echo '#!/bin/bash\n\ -apt_update_with_fallback() {\n\ - local sources_file="/etc/apt/sources.list.d/ubuntu.sources"\n\ - local max_attempts=2\n\ - local attempt=1\n\ - local mirrors="archive.ubuntu.com us.archive.ubuntu.com"\n\ - \n\ - for mirror in $mirrors; do\n\ - echo "========================================="\n\ - echo "Attempting apt-get update with mirror: ${mirror}"\n\ - echo "Attempt ${attempt} of ${max_attempts}"\n\ - echo "========================================="\n\ - \n\ - if [ -f "${sources_file}" ]; then\n\ - sed -i "s|http://[^/]*/ubuntu/|http://${mirror}/ubuntu/|g" "${sources_file}"\n\ - fi\n\ - \n\ - if timeout 300 apt-get update 2>&1; then\n\ - echo "========================================="\n\ - echo "✓ Successfully updated apt cache using mirror: ${mirror}"\n\ - echo "========================================="\n\ - return 0\n\ - else\n\ - local exit_code=$?\n\ - echo "========================================="\n\ - echo "✗ Failed to update using mirror: ${mirror}"\n\ - echo "Exit code: ${exit_code}"\n\ - echo "========================================="\n\ - \n\ - apt-get clean\n\ - rm -rf /var/lib/apt/lists/*\n\ - \n\ - if [ ${attempt} -lt ${max_attempts} ]; then\n\ - local sleep_time=$((attempt * 5))\n\ - echo "Waiting ${sleep_time} seconds before trying next mirror..."\n\ - sleep ${sleep_time}\n\ - fi\n\ - fi\n\ - \n\ - attempt=$((attempt + 1))\n\ - done\n\ - \n\ - echo "========================================="\n\ - echo "ERROR: All mirror tiers failed after ${max_attempts} attempts"\n\ - echo "========================================="\n\ - return 1\n\ -}' > /usr/local/bin/apt-update-fallback.sh && chmod +x /usr/local/bin/apt-update-fallback.sh - -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt install -y \ +# Alpine-based slim PostgreSQL 17 with OrioleDB image using Nix extensions + +#################### +# Stage 1: Nix builder +#################### +FROM alpine:3.21 AS nix-builder + +# Install dependencies for nix installer (coreutils for GNU cp, sudo for installer) +RUN apk add --no-cache \ + bash \ + coreutils \ curl \ - gnupg \ - lsb-release \ - software-properties-common \ - wget \ + shadow \ sudo \ - tree \ - && apt clean + xz +# Create users (Alpine syntax) +RUN addgroup -S postgres && \ + adduser -S -h /var/lib/postgresql -s /bin/bash -G postgres postgres && \ + addgroup -S wal-g && \ + adduser -S -s /bin/bash -G wal-g wal-g -RUN adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres -RUN adduser --system --no-create-home --shell /bin/bash --group wal-g +# Create nix config RUN cat < /tmp/extra-nix.conf extra-experimental-features = nix-command flakes extra-substituters = https://nix-postgres-artifacts.s3.amazonaws.com extra-trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI= EOF + +# Install nix RUN curl -L https://releases.nixos.org/nix/nix-2.32.2/install | sh -s -- --daemon --no-channel-add --yes --nix-extra-conf-file /tmp/extra-nix.conf ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin" -COPY . /nixpg - WORKDIR /nixpg +COPY . . -RUN nix profile add path:.#psql_orioledb-17/bin +# Build PostgreSQL with extensions +RUN nix profile add path:.#psql_orioledb-17_slim/bin RUN nix store gc -WORKDIR / - - -RUN mkdir -p /usr/lib/postgresql/bin \ - /usr/lib/postgresql/share/postgresql \ - /usr/share/postgresql \ - /var/lib/postgresql \ - && chown -R postgres:postgres /usr/lib/postgresql \ - && chown -R postgres:postgres /var/lib/postgresql \ - && chown -R postgres:postgres /usr/share/postgresql - -# Create symbolic links -RUN ln -s /nix/var/nix/profiles/default/bin/* /usr/lib/postgresql/bin/ \ - && ln -s /nix/var/nix/profiles/default/bin/* /usr/bin/ \ - && chown -R postgres:postgres /usr/bin - -# Create symbolic links for PostgreSQL shares -RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ -RUN ln -s /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ -RUN chown -R postgres:postgres /usr/lib/postgresql/share/postgresql/ -RUN chown -R postgres:postgres /usr/share/postgresql/ - -RUN tree /nix > /tmp/tree.txt && cat /tmp/tree.txt && cat /tmp/tree.txt >&2 - -RUN chown -R postgres:postgres /usr/lib/postgresql - -RUN ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets +# Build groonga and copy plugins +RUN nix profile add path:.#supabase-groonga && \ + mkdir -p /tmp/groonga-plugins && \ + cp -r /nix/var/nix/profiles/default/lib/groonga/plugins /tmp/groonga-plugins/ +RUN nix store gc -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && \ - apt-get install -y --no-install-recommends tzdata +#################### +# Stage 2: Gosu builder +#################### +FROM alpine:3.21 AS gosu-builder -RUN ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ - dpkg-reconfigure --frontend noninteractive tzdata +ARG TARGETARCH +ARG GOSU_VERSION=1.16 -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && \ - apt-get install -y --no-install-recommends \ - build-essential \ - checkinstall \ - cmake +RUN apk add --no-cache gnupg curl -ENV PGDATA=/var/lib/postgresql/data +# Download and verify gosu +RUN curl -fsSL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${TARGETARCH}" -o /usr/local/bin/gosu && \ + curl -fsSL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${TARGETARCH}.asc" -o /usr/local/bin/gosu.asc && \ + GNUPGHOME="$(mktemp -d)" && \ + export GNUPGHOME && \ + gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 && \ + gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ + rm -rf "$GNUPGHOME" /usr/local/bin/gosu.asc && \ + chmod +x /usr/local/bin/gosu -WORKDIR / #################### -# setup-groonga +# Stage 3: Final production image #################### -FROM base as groonga - -WORKDIR /nixpg +FROM alpine:3.21 AS production -RUN nix profile add path:.#supabase-groonga && \ - mkdir -p /tmp/groonga-plugins && \ - cp -r /nix/var/nix/profiles/default/lib/groonga/plugins /tmp/groonga-plugins/ +# Install minimal runtime dependencies +RUN apk add --no-cache \ + bash \ + curl \ + shadow \ + su-exec \ + tzdata \ + musl-locales \ + musl-locales-lang \ + && rm -rf /var/cache/apk/* + +# Create postgres user/group +RUN addgroup -S postgres && \ + adduser -S -G postgres -h /var/lib/postgresql -s /bin/bash postgres && \ + addgroup -S wal-g && \ + adduser -S -G wal-g -s /bin/bash wal-g && \ + adduser postgres wal-g + +# Copy Nix store and profiles from builder (profile already created by nix profile install) +COPY --from=nix-builder /nix /nix + +# Copy groonga plugins +COPY --from=nix-builder /tmp/groonga-plugins/plugins /usr/lib/groonga/plugins + +# Copy gosu +COPY --from=gosu-builder /usr/local/bin/gosu /usr/local/bin/gosu + +# Setup PostgreSQL directories +RUN mkdir -p /usr/lib/postgresql/bin \ + /usr/lib/postgresql/share/postgresql \ + /usr/share/postgresql \ + /var/lib/postgresql/data \ + /var/run/postgresql \ + && chown -R postgres:postgres /usr/lib/postgresql \ + && chown -R postgres:postgres /var/lib/postgresql \ + && chown -R postgres:postgres /usr/share/postgresql \ + && chown -R postgres:postgres /var/run/postgresql -RUN nix store gc +# Create symbolic links for binaries +RUN for f in /nix/var/nix/profiles/default/bin/*; do \ + ln -sf "$f" /usr/lib/postgresql/bin/ 2>/dev/null || true; \ + ln -sf "$f" /usr/bin/ 2>/dev/null || true; \ + done -WORKDIR / -# #################### -# # Download gosu for easy step-down from root -# #################### -FROM base as gosu -ARG TARGETARCH -# Install dependencies -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt-get install -y --no-install-recommends \ - gnupg \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* -# Download binary -ARG GOSU_VERSION=1.16 -ARG GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4 -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH \ - /usr/local/bin/gosu -ADD https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$TARGETARCH.asc \ - /usr/local/bin/gosu.asc -# Verify checksum -RUN gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys $GOSU_GPG_KEY && \ - gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu && \ - gpgconf --kill all && \ - chmod +x /usr/local/bin/gosu +# Create symbolic links for PostgreSQL shares +RUN ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/lib/postgresql/share/postgresql/ 2>/dev/null || true && \ + ln -sf /nix/var/nix/profiles/default/share/postgresql/* /usr/share/postgresql/ 2>/dev/null || true && \ + ln -sf /usr/lib/postgresql/share/postgresql/timezonesets /usr/share/postgresql/timezonesets 2>/dev/null || true -# #################### -# # Build final image -# #################### -FROM gosu as production -RUN id postgres || (echo "postgres user does not exist" && exit 1) -# # Setup extensions -COPY --from=groonga /tmp/groonga-plugins/plugins /usr/lib/groonga/plugins +# Set permissions +RUN chown -R postgres:postgres /usr/lib/postgresql && \ + chown -R postgres:postgres /usr/share/postgresql -# # Initialise configs +# Setup configs COPY --chown=postgres:postgres ansible/files/postgresql_config/postgresql.conf.j2 /etc/postgresql/postgresql.conf COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_hba.conf.j2 /etc/postgresql/pg_hba.conf COPY --chown=postgres:postgres ansible/files/postgresql_config/pg_ident.conf.j2 /etc/postgresql/pg_ident.conf @@ -223,6 +140,7 @@ COPY --chown=postgres:postgres ansible/files/postgresql_config/custom_read_repli COPY --chown=postgres:postgres ansible/files/walg_helper_scripts/wal_fetch.sh /home/postgres/wal_fetch.sh COPY ansible/files/walg_helper_scripts/wal_change_ownership.sh /root/wal_change_ownership.sh +# Configure PostgreSQL settings RUN sed -i \ -e "s|#unix_socket_directories = '/tmp'|unix_socket_directories = '/var/run/postgresql'|g" \ -e "s|#session_preload_libraries = ''|session_preload_libraries = 'supautils'|g" \ @@ -230,16 +148,16 @@ RUN sed -i \ -e "s|#include = '/etc/postgresql-custom/wal-g.conf'|include = '/etc/postgresql-custom/wal-g.conf'|g" /etc/postgresql/postgresql.conf && \ echo "pgsodium.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ echo "vault.getkey_script= '/usr/lib/postgresql/bin/pgsodium_getkey.sh'" >> /etc/postgresql/postgresql.conf && \ - usermod -aG postgres wal-g && \ chown -R postgres:postgres /etc/postgresql-custom - # Remove items from postgresql.conf -RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" - #as of pg 16.4 + this db_user_namespace totally deprecated and will break the server if setting is present -RUN sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" -RUN sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "/etc/postgresql-custom/supautils.conf" -RUN sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "/etc/postgresql/postgresql.conf" -RUN echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgresql.conf" +# Remove timescaledb, plv8, postgis, pgrouting references (not available in orioledb build) +RUN sed -i 's/ timescaledb,//g;' "/etc/postgresql/postgresql.conf" && \ + sed -i 's/db_user_namespace = off/#db_user_namespace = off/g;' "/etc/postgresql/postgresql.conf" && \ + sed -i 's/ timescaledb,//g; s/ plv8,//g; s/ postgis,//g; s/ pgrouting,//g' "/etc/postgresql-custom/supautils.conf" + +# OrioleDB configuration +RUN sed -i 's/\(shared_preload_libraries.*\)'\''\(.*\)$/\1, orioledb'\''\2/' "/etc/postgresql/postgresql.conf" && \ + echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgresql.conf" # OrioleDB rewind configuration # Enables time-based rewind capability for up to 20 minutes (1200 seconds) @@ -247,49 +165,44 @@ RUN echo "default_table_access_method = 'orioledb'" >> "/etc/postgresql/postgres RUN echo "orioledb.enable_rewind = true" >> "/etc/postgresql/postgresql.conf" && \ echo "orioledb.rewind_max_time = 1200" >> "/etc/postgresql/postgresql.conf" && \ echo "orioledb.rewind_max_transactions = 100000" >> "/etc/postgresql/postgresql.conf" && \ - echo "orioledb.rewind_buffers = 1280" >> "/etc/postgresql/postgresql.conf" + echo "orioledb.rewind_buffers = 1280" >> "/etc/postgresql/postgresql.conf" - -# # Include schema migrations +# Include schema migrations COPY migrations/db /docker-entrypoint-initdb.d/ COPY ansible/files/pgbouncer_config/pgbouncer_auth_schema.sql /docker-entrypoint-initdb.d/init-scripts/00-schema.sql COPY ansible/files/stat_extension.sql /docker-entrypoint-initdb.d/migrations/00-extension.sql + # Enable orioledb extension first RUN echo "CREATE EXTENSION orioledb;" > /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql && \ chown postgres:postgres /docker-entrypoint-initdb.d/init-scripts/00-pre-init.sql -# # Add upstream entrypoint script pinned for now to last tested version -COPY --from=gosu /usr/local/bin/gosu /usr/local/bin/gosu +# Add entrypoint script ADD --chmod=0755 \ https://github.com/docker-library/postgres/raw/889f9447cd2dfe21cccfbe9bb7945e3b037e02d8/17/bullseye/docker-entrypoint.sh \ /usr/local/bin/docker-entrypoint.sh -RUN mkdir -p /var/run/postgresql && chown postgres:postgres /var/run/postgresql - -ENTRYPOINT ["docker-entrypoint.sh"] - -HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost -STOPSIGNAL SIGINT -EXPOSE 5432 +# Setup pgsodium key script +RUN mkdir -p /usr/share/postgresql/extension/ && \ + ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ + chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh +# Environment variables +ENV PATH="/nix/var/nix/profiles/default/bin:/usr/lib/postgresql/bin:${PATH}" +ENV PGDATA=/var/lib/postgresql/data ENV POSTGRES_HOST=/var/run/postgresql ENV POSTGRES_USER=supabase_admin ENV POSTGRES_DB=postgres ENV POSTGRES_INITDB_ARGS="--allow-group-access --locale-provider=icu --encoding=UTF-8 --icu-locale=en_US.UTF-8" -RUN bash -c 'source /usr/local/bin/apt-update-fallback.sh && apt_update_with_fallback' && apt-get install -y --no-install-recommends \ - locales \ - && rm -rf /var/lib/apt/lists/* && \ - localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ - && localedef -i C -c -f UTF-8 -A /usr/share/locale/locale.alias C.UTF-8 -RUN echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && locale-gen -ENV LANG en_US.UTF-8 -ENV LANGUAGE en_US:en -ENV LC_ALL en_US.UTF-8 -ENV LOCALE_ARCHIVE /usr/lib/locale/locale-archive -RUN mkdir -p /usr/share/postgresql/extension/ && \ - ln -s /usr/lib/postgresql/bin/pgsodium_getkey.sh /usr/share/postgresql/extension/pgsodium_getkey && \ - chmod +x /usr/lib/postgresql/bin/pgsodium_getkey.sh - +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 ENV GRN_PLUGINS_DIR=/usr/lib/groonga/plugins +# Point to minimal glibc locales included in slim Nix package for initdb locale support +ENV LOCALE_ARCHIVE=/nix/var/nix/profiles/default/lib/locale/locale-archive + +ENTRYPOINT ["docker-entrypoint.sh"] +HEALTHCHECK --interval=2s --timeout=2s --retries=10 CMD pg_isready -U postgres -h localhost +STOPSIGNAL SIGINT +EXPOSE 5432 CMD ["postgres", "-D", "/etc/postgresql"] diff --git a/ansible/vars.yml b/ansible/vars.yml index e76f8d3e7..a3c602ea6 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -10,9 +10,9 @@ postgres_major: # Full version strings for each major version postgres_release: - postgresorioledb-17: "17.6.0.036-orioledb" - postgres17: "17.6.1.079" - postgres15: "15.14.1.079" + postgresorioledb-17: "17.6.0.036-orioledb-dckr-1" + postgres17: "17.6.1.079-dckr-1" + postgres15: "15.14.1.079-dckr-1" # Non Postgres Extensions pgbouncer_release: 1.25.1 diff --git a/docs/plans/2026-01-21-psql-slim-latest-only.md b/docs/plans/2026-01-21-psql-slim-latest-only.md new file mode 100644 index 000000000..9654b2009 --- /dev/null +++ b/docs/plans/2026-01-21-psql-slim-latest-only.md @@ -0,0 +1,536 @@ +# PostgreSQL Slim Image (Latest Extensions Only) Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Create a new `psql_17_slim/bin` flake output that includes only the latest version of each PostgreSQL extension, reducing image size by ~40-60%. + +**Architecture:** Add a `latestOnly` parameter to extension nix files. When true, only build the latest version instead of all versions. Create new `makePostgresBinSlim` function in postgres.nix that passes this parameter. + +**Tech Stack:** Nix, flake-parts + +**Estimated size reduction:** ~700MB+ (wrappers alone has 13 versions → 1) + +--- + +## Task 1: Update postgres.nix to Support Slim Builds + +**Files:** +- Modify: `nix/packages/postgres.nix` + +**Step 1: Add latestOnly parameter to extCallPackage** + +In `makeOurPostgresPkgs`, modify the `extCallPackage` call to accept a `latestOnly` parameter: + +```nix +# Around line 94, modify makeOurPostgresPkgs to accept latestOnly parameter +makeOurPostgresPkgs = + version: + { latestOnly ? false }: + let + postgresql = getPostgresqlPackage version; + extensionsToUse = + if (builtins.elem version [ "orioledb-17" ]) then + orioledbExtensions + else if (builtins.elem version [ "17" ]) then + dbExtensions17 + else + ourExtensions; + extCallPackage = pkgs.lib.callPackageWith ( + pkgs + // { + inherit postgresql latestOnly; + switch-ext-version = extCallPackage ./switch-ext-version.nix { }; + overlayfs-on-package = extCallPackage ./overlayfs-on-package.nix { }; + } + ); + in + map (path: extCallPackage path { }) extensionsToUse; +``` + +**Step 2: Update makePostgresBin to accept latestOnly** + +```nix +# Around line 143, modify makePostgresBin +makePostgresBin = + version: + { latestOnly ? false }: + let + postgresql = getPostgresqlPackage version; + postgres-pkgs = makeOurPostgresPkgs version { inherit latestOnly; }; + ourExts = map (ext: { + name = ext.name; + version = ext.version; + }) postgres-pkgs; + + pgbin = postgresql.withPackages (_ps: postgres-pkgs); + in + pkgs.symlinkJoin { + inherit (pgbin) name version; + paths = [ + pgbin + (makeReceipt pgbin ourExts) + ]; + }; +``` + +**Step 3: Update makePostgres to accept latestOnly** + +```nix +# Around line 172, modify makePostgres +makePostgres = + version: + { latestOnly ? false }: + lib.recurseIntoAttrs { + bin = makePostgresBin version { inherit latestOnly; }; + exts = makeOurPostgresPkgsSet version; + }; +``` + +**Step 4: Add slim packages to basePackages** + +```nix +# Around line 178 +basePackages = { + psql_15 = makePostgres "15" { }; + psql_17 = makePostgres "17" { }; + psql_orioledb-17 = makePostgres "orioledb-17" { }; +}; + +slimPackages = { + psql_17_slim = makePostgres "17" { latestOnly = true; }; +}; +``` + +**Step 5: Update binPackages to include slim variants** + +```nix +# Around line 183 +binPackages = lib.mapAttrs' (name: value: { + name = "${name}/bin"; + value = value.bin; +}) (basePackages // slimPackages); +``` + +**Step 6: Commit** + +```bash +git add nix/packages/postgres.nix +git commit -m "feat(nix): add latestOnly parameter support to postgres.nix" +``` + +--- + +## Task 2: Update pgvector.nix (Template Pattern) + +**Files:** +- Modify: `nix/ext/pgvector.nix` + +This is the template pattern that will be applied to all multi-version extensions. + +**Step 1: Add latestOnly parameter to function signature** + +```nix +# Line 1-7, add latestOnly parameter +{ + pkgs, + lib, + stdenv, + fetchFromGitHub, + postgresql, + latestOnly ? false, +}: +``` + +**Step 2: Modify version selection to respect latestOnly** + +```nix +# After line 21 (after latestVersion = lib.last versions;) +# Replace: +# packages = builtins.attrValues ( +# lib.mapAttrs (name: value: build name value.hash) supportedVersions +# ); +# With: +versionsToUse = if latestOnly + then { "${latestVersion}" = supportedVersions.${latestVersion}; } + else supportedVersions; +packages = builtins.attrValues ( + lib.mapAttrs (name: value: build name value.hash) versionsToUse +); +versionsBuilt = if latestOnly then [ latestVersion ] else versions; +numberOfVersionsBuilt = builtins.length versionsBuilt; +``` + +**Step 3: Update passthru to reflect actual versions built** + +```nix +# Around line 85-91, update passthru +passthru = { + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; + version = if latestOnly + then latestVersion + else "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + pgRegressTestName = "pgvector"; +}; +``` + +**Step 4: Commit** + +```bash +git add nix/ext/pgvector.nix +git commit -m "feat(ext): add latestOnly support to pgvector" +``` + +--- + +## Task 3: Update wrappers/default.nix + +**Files:** +- Modify: `nix/ext/wrappers/default.nix` + +This is the most complex extension with migration SQL files. Since we don't need migrations for slim, simplify significantly. + +**Step 1: Add latestOnly parameter** + +```nix +# Line 1-12, add latestOnly parameter +{ + lib, + stdenv, + callPackages, + fetchFromGitHub, + openssl, + pkg-config, + postgresql, + buildEnv, + rust-bin, + git, + latestOnly ? false, +}: +``` + +**Step 2: Modify version selection** + +```nix +# After line 208 (after latestVersion = lib.last versions;) +versionsToUse = if latestOnly + then lib.filterAttrs (n: _: n == latestVersion) supportedVersions + else supportedVersions; +versionsBuilt = if latestOnly then [ latestVersion ] else versions; +numberOfVersionsBuilt = builtins.length versionsBuilt; + +# Update packagesAttrSet to use versionsToUse +packagesAttrSet = lib.mapAttrs' (name: value: { + name = lib.replaceStrings [ "." ] [ "_" ] name; + value = build name value.hash value.rust value.pgrx; +}) versionsToUse; +``` + +**Step 3: Simplify postBuild for latestOnly** + +```nix +# Around line 229, modify postBuild to skip migrations when latestOnly +postBuild = '' + create_control_files() { + { + echo "default_version = '${latestVersion}'" + cat $out/share/postgresql/extension/${pname}--${latestVersion}.control + } > $out/share/postgresql/extension/${pname}.control + } + + create_lib_files() { + ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} + ${lib.optionalString (!latestOnly) '' + # Create symlinks for all previously packaged versions to main library + for v in ${lib.concatStringsSep " " previouslyPackagedVersions}; do + ln -sfn $out/lib/${pname}${postgresql.dlSuffix} $out/lib/${pname}-$v${postgresql.dlSuffix} + done + ''} + } + + ${lib.optionalString (!latestOnly) '' + create_migration_sql_files() { + # ... existing migration logic ... + } + ''} + + create_control_files + create_lib_files + ${lib.optionalString (!latestOnly) "create_migration_sql_files"} + + # Verify library count matches expected + ${if latestOnly then '' + (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "2") + '' else '' + (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ + toString (numberOfVersions + numberOfPreviouslyPackagedVersions + 1) + }") + ''} +''; +``` + +**Step 4: Update passthru** + +```nix +passthru = { + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + pname = "${pname}"; + inherit latestOnly; + version = if latestOnly + then latestVersion + else "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + packages = packagesAttrSet // { + recurseForDerivations = true; + }; +}; +``` + +**Step 5: Commit** + +```bash +git add nix/ext/wrappers/default.nix +git commit -m "feat(ext): add latestOnly support to wrappers" +``` + +--- + +## Task 4: Update pg_graphql/default.nix + +**Files:** +- Modify: `nix/ext/pg_graphql/default.nix` + +**Step 1: Add latestOnly parameter and modify version selection** + +Apply the same pattern as pgvector: +1. Add `latestOnly ? false` to function parameters +2. Create `versionsToUse` filtered by latestOnly +3. Update packages list to use versionsToUse +4. Update passthru + +**Step 2: Commit** + +```bash +git add nix/ext/pg_graphql/default.nix +git commit -m "feat(ext): add latestOnly support to pg_graphql" +``` + +--- + +## Task 5: Update pg_net.nix + +**Files:** +- Modify: `nix/ext/pg_net.nix` + +Apply the same pattern as pgvector. + +**Step 1: Add latestOnly parameter and modify version selection** + +**Step 2: Update library count check for latestOnly** + +```nix +# In postBuild, update the check +${if latestOnly then '' + (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "2") +'' else '' + (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ + toString (numberOfVersions + 1) + }") +''} +``` + +**Step 3: Commit** + +```bash +git add nix/ext/pg_net.nix +git commit -m "feat(ext): add latestOnly support to pg_net" +``` + +--- + +## Task 6: Update pgsodium.nix + +**Files:** +- Modify: `nix/ext/pgsodium.nix` + +Apply the same pattern as pgvector. + +**Commit:** +```bash +git add nix/ext/pgsodium.nix +git commit -m "feat(ext): add latestOnly support to pgsodium" +``` + +--- + +## Task 7: Update pgaudit.nix + +**Files:** +- Modify: `nix/ext/pgaudit.nix` + +Apply the same pattern as pgvector. + +**Commit:** +```bash +git add nix/ext/pgaudit.nix +git commit -m "feat(ext): add latestOnly support to pgaudit" +``` + +--- + +## Task 8: Update pg_jsonschema/default.nix + +**Files:** +- Modify: `nix/ext/pg_jsonschema/default.nix` + +Apply the same pattern as pgvector. + +**Commit:** +```bash +git add nix/ext/pg_jsonschema/default.nix +git commit -m "feat(ext): add latestOnly support to pg_jsonschema" +``` + +--- + +## Task 9: Update Remaining Multi-Version Extensions + +Apply the same pattern to these extensions (4 or fewer versions each): + +**Files:** +- `nix/ext/pg_cron/default.nix` +- `nix/ext/pg_repack.nix` +- `nix/ext/pg_tle.nix` +- `nix/ext/plv8/default.nix` +- `nix/ext/pgsql-http.nix` +- `nix/ext/hypopg.nix` +- `nix/ext/pgmq/default.nix` +- `nix/ext/pgroonga/default.nix` +- `nix/ext/pgrouting/default.nix` +- `nix/ext/vault.nix` +- `nix/ext/rum.nix` +- `nix/ext/postgis.nix` +- `nix/ext/supautils.nix` + +For single-version extensions, just add the parameter with no-op behavior: +```nix +latestOnly ? false, # unused, for API compatibility +``` + +**Commit:** +```bash +git add nix/ext/ +git commit -m "feat(ext): add latestOnly support to remaining extensions" +``` + +--- + +## Task 10: Update Dockerfile-17 to Use Slim Package + +**Files:** +- Modify: `Dockerfile-17` + +**Step 1: Change nix profile add command** + +Find the line: +```dockerfile +RUN nix profile add path:.#psql_17/bin +``` + +Change to: +```dockerfile +RUN nix profile add path:.#psql_17_slim/bin +``` + +**Step 2: Commit** + +```bash +git add Dockerfile-17 +git commit -m "feat(docker): use psql_17_slim for smaller image size" +``` + +--- + +## Task 11: Test and Verify + +**Step 1: Build the slim package** + +```bash +nix build .#psql_17_slim/bin +``` + +Expected: Build succeeds with only latest versions. + +**Step 2: Verify extension count** + +```bash +ls -la result/lib/*.so | wc -l +``` + +Expected: Significantly fewer .so files than full build. + +**Step 3: Verify receipt.json** + +```bash +cat result/receipt.json | jq '.extensions | length' +``` + +Expected: Same number of extensions, but each with single version. + +**Step 4: Build Docker image and compare size** + +```bash +nix run .#image-size-analyzer -- --image Dockerfile-17 +``` + +Expected: Total size reduced by 40-60%. + +**Step 5: Commit any fixes** + +--- + +## Task 12: Update Documentation + +**Files:** +- Modify: `nix/docs/image-size-analyzer-usage.md` + +Add section explaining the slim vs full packages: + +```markdown +## Package Variants + +### Full Package (`psql_17/bin`) +Includes all versions of each extension for migration support. +Use for: Production databases that need `ALTER EXTENSION ... UPDATE`. + +### Slim Package (`psql_17_slim/bin`) +Includes only the latest version of each extension. +Use for: CI/CD, testing, new deployments without migration needs. +Typical size savings: 40-60% smaller. +``` + +**Commit:** +```bash +git add nix/docs/ +git commit -m "docs: document slim vs full package variants" +``` + +--- + +## Summary + +| Task | Files Modified | Estimated Savings | +|------|---------------|-------------------| +| 1 | postgres.nix | - | +| 2 | pgvector.nix | ~100MB | +| 3 | wrappers/default.nix | ~700MB | +| 4 | pg_graphql/default.nix | ~200MB | +| 5 | pg_net.nix | ~150MB | +| 6 | pgsodium.nix | ~50MB | +| 7 | pgaudit.nix | ~30MB | +| 8 | pg_jsonschema/default.nix | ~30MB | +| 9 | Remaining extensions | ~100MB | +| 10 | Dockerfile-17 | - | + +**Total estimated savings: 1.2-1.5 GB** diff --git a/docs/plans/2026-01-30-pg-startup-profiler-design.md b/docs/plans/2026-01-30-pg-startup-profiler-design.md new file mode 100644 index 000000000..e83ed3e81 --- /dev/null +++ b/docs/plans/2026-01-30-pg-startup-profiler-design.md @@ -0,0 +1,240 @@ +# pg-startup-profiler Design + +## Overview + +A Go tool for profiling PostgreSQL container startup time with detailed breakdown of what contributes to startup latency. + +## Goals + +- Measure total startup time (what users perceive: container start → ready for connections) +- Provide detailed breakdown: init scripts, migrations, extensions, background workers +- Non-intrusive: no modifications to container images +- Accurate timing using eBPF tracing + PostgreSQL log timestamps +- Pluggable log pattern matching for flexibility + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ pg-startup-profiler │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ 1. Docker Client │ +│ └── Start container │ +│ └── Get container's cgroup ID for filtering │ +│ │ +│ 2. eBPF Probes (attached to kernel) │ +│ ├── execve → every process/command executed │ +│ ├── openat → every file opened (SQL, .so, config) │ +│ │ +│ 3. Log Stream Parser │ +│ └── Attach to container stdout/stderr │ +│ └── Match configurable patterns │ +│ └── Extract PostgreSQL timestamps (accurate) │ +│ │ +│ 4. Event Filter │ +│ └── Filter eBPF events to container's cgroup │ +│ │ +│ 5. Timeline Builder │ +│ └── Correlate all events into unified timeline │ +│ └── Group into phases │ +│ │ +│ 6. Reporter │ +│ └── CLI table / JSON output │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +## eBPF Probes + +| Probe | Syscall | What we capture | +|-------|---------|-----------------| +| `tracepoint/syscalls/sys_enter_execve` | execve | Command, args, timestamp | +| `tracepoint/syscalls/sys_enter_openat` | openat | File path, timestamp | + +Events are filtered by cgroup ID to only capture activity from the target container. + +## Pluggable Log Rules + +Rules defined in YAML for matching PostgreSQL log patterns: + +```yaml +patterns: + - name: "initdb_start" + regex: 'running bootstrap script' + + - name: "initdb_complete" + regex: 'syncing data to disk' + + - name: "temp_server_start" + regex: 'database system is ready to accept connections' + occurrence: 1 + + - name: "server_shutdown" + regex: 'database system is shut down' + + - name: "final_server_ready" + regex: 'database system is ready to accept connections' + occurrence: 2 + marks_ready: true # This indicates container is ready + + - name: "extension_load" + regex: 'CREATE EXTENSION.*(?P\w+)' + capture: extension + + - name: "bgworker_start" + regex: '(?Ppg_cron|pg_net).*started' + capture: worker + +timestamp: + regex: '^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +``` + +## Output Formats + +### CLI Table (default) + +``` +════════════════════════════════════════════════════════════════════════════════ +PostgreSQL Container Startup Profile +════════════════════════════════════════════════════════════════════════════════ + +Image: pg-docker-test:17 +Total: 8.234s + +PHASES +──────────────────────────────────────────────────────────────────────────────── + Phase Duration Pct + ───────────────────────────────────────────────── + Container init 0.143s 1.7% + initdb 2.535s 30.8% + Init scripts 4.912s 59.6% + Final server start 0.644s 7.8% + +INIT SCRIPTS (top 5 by duration) +──────────────────────────────────────────────────────────────────────────────── + Script Duration + ──────────────────────────────────────────────────────── + migrations/00-schema.sql 1.203s + migrations/20211115181400_auth-schema.sql 0.892s + migrations/20230201034123_extensions.sql 0.445s + ... + +EXTENSIONS +──────────────────────────────────────────────────────────────────────────────── + Extension Load time + ────────────────────────────── + vector 0.245s + pgsodium 0.189s + pg_graphql 0.156s + ... + +BACKGROUND WORKERS +──────────────────────────────────────────────────────────────────────────────── + Worker Started at + ────────────────────────────── + pg_cron 8.198s + pg_net 8.212s +``` + +### JSON (`--json`) + +```json +{ + "image": "pg-docker-test:17", + "total_duration_ms": 8234, + "phases": { + "container_init": {"duration_ms": 143, "pct": 1.7}, + "initdb": {"duration_ms": 2535, "pct": 30.8}, + "init_scripts": {"duration_ms": 4912, "pct": 59.6}, + "final_server_start": {"duration_ms": 644, "pct": 7.8} + }, + "init_scripts": [...], + "extensions": [...], + "events": [...] +} +``` + +## CLI Interface + +```bash +# Profile a Dockerfile (builds and profiles) +pg-startup-profiler --dockerfile Dockerfile-17 + +# Profile existing image +pg-startup-profiler --image pg-docker-test:17 + +# JSON output for CI +pg-startup-profiler --image pg-docker-test:17 --json + +# Custom rules file +pg-startup-profiler --image pg-docker-test:17 --rules my-rules.yaml + +# Verbose (include full event timeline) +pg-startup-profiler --image pg-docker-test:17 --verbose + +# Compare two images +pg-startup-profiler compare --baseline pg-docker-test:17 --candidate pg-docker-test:17-slim +``` + +## Project Structure + +``` +nix/packages/pg-startup-profiler/ +├── cmd/ +│ └── pg-startup-profiler/ +│ └── main.go # Cobra CLI entry point +├── internal/ +│ ├── docker/ +│ │ └── client.go # Docker API interactions +│ ├── ebpf/ +│ │ ├── bpf_bpfel.go # Generated eBPF Go bindings +│ │ ├── bpf_bpfel.o # Compiled eBPF bytecode +│ │ ├── probes.c # eBPF programs (C) +│ │ └── tracer.go # Go wrapper for eBPF +│ ├── logs/ +│ │ └── parser.go # Log stream + pattern matching +│ ├── rules/ +│ │ ├── rules.go # YAML rule loading +│ │ └── default.go # Embedded default rules +│ └── report/ +│ ├── timeline.go # Event correlation +│ ├── table.go # CLI table output +│ └── json.go # JSON output +├── rules/ +│ └── default.yaml # Default PostgreSQL patterns +├── go.mod +├── go.sum +└── README.md +``` + +## Nix Integration + +Package definition follows existing patterns (like supascan): +- `nix/packages/pg-startup-profiler.nix` - build definition +- Registered in `nix/packages/default.nix` +- Added to `nix/apps.nix` for `nix run` + +## Requirements + +- Linux only (eBPF requirement) +- Elevated privileges (CAP_BPF or root) for eBPF tracing +- Docker daemon access + +## Dependencies + +- Go 1.21+ +- cilium/ebpf (pure Go eBPF library) +- spf13/cobra (CLI framework) +- docker/docker (Docker API client) +- gopkg.in/yaml.v3 (YAML parsing) + +## Safety Considerations + +The tool is safe for testing contexts: +- eBPF probes are read-only observers +- No modifications to container images +- No injection into containers +- Container runs exactly as it would in production +- Only runs during explicit profiling diff --git a/docs/plans/2026-01-30-pg-startup-profiler-implementation.md b/docs/plans/2026-01-30-pg-startup-profiler-implementation.md new file mode 100644 index 000000000..b73809b1a --- /dev/null +++ b/docs/plans/2026-01-30-pg-startup-profiler-implementation.md @@ -0,0 +1,1691 @@ +# pg-startup-profiler Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Build a Go tool that profiles PostgreSQL container startup time using eBPF tracing and log parsing. + +**Architecture:** Docker API for container lifecycle, eBPF probes (execve/openat) filtered by cgroup for syscall tracing, log stream parsing with configurable YAML rules for PostgreSQL events, unified timeline correlating all events. + +**Tech Stack:** Go 1.23+, cilium/ebpf, spf13/cobra, docker/docker client, gopkg.in/yaml.v3, charmbracelet/log + +--- + +## Task 1: Project Scaffolding + +**Files:** +- Create: `nix/packages/pg-startup-profiler/go.mod` +- Create: `nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/main.go` +- Create: `nix/packages/pg-startup-profiler/internal/logger/logger.go` + +**Step 1: Create directory structure** + +```bash +mkdir -p nix/packages/pg-startup-profiler/cmd/pg-startup-profiler +mkdir -p nix/packages/pg-startup-profiler/internal/logger +mkdir -p nix/packages/pg-startup-profiler/internal/docker +mkdir -p nix/packages/pg-startup-profiler/internal/ebpf +mkdir -p nix/packages/pg-startup-profiler/internal/logs +mkdir -p nix/packages/pg-startup-profiler/internal/rules +mkdir -p nix/packages/pg-startup-profiler/internal/report +mkdir -p nix/packages/pg-startup-profiler/rules +``` + +**Step 2: Create go.mod** + +```go +// nix/packages/pg-startup-profiler/go.mod +module github.com/supabase/pg-startup-profiler + +go 1.23.0 + +require ( + github.com/charmbracelet/log v0.4.2 + github.com/cilium/ebpf v0.17.3 + github.com/docker/docker v27.5.1+incompatible + github.com/spf13/cobra v1.8.1 + gopkg.in/yaml.v3 v3.0.1 +) +``` + +**Step 3: Create logger (matching supascan pattern)** + +```go +// nix/packages/pg-startup-profiler/internal/logger/logger.go +package logger + +import ( + "io" + "os" + + "github.com/charmbracelet/log" +) + +func Setup(verbose, debug bool) *log.Logger { + var output io.Writer = io.Discard + var level log.Level = log.InfoLevel + + if debug { + output = os.Stderr + level = log.DebugLevel + } else if verbose { + output = os.Stderr + level = log.InfoLevel + } + + return log.NewWithOptions(output, log.Options{ + Level: level, + ReportTimestamp: debug, + }) +} +``` + +**Step 4: Create main.go with root command** + +```go +// nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/main.go +package main + +import ( + "os" + + "github.com/spf13/cobra" +) + +var ( + version = "dev" +) + +var rootCmd = &cobra.Command{ + Use: "pg-startup-profiler", + Short: "PostgreSQL container startup profiler", + Long: `pg-startup-profiler - Profile PostgreSQL container startup time + +A tool for measuring and analyzing PostgreSQL container startup performance +using eBPF tracing and log parsing. + +Commands: + profile Profile a container's startup time + compare Compare startup times between two images + +Examples: + # Profile a Dockerfile + pg-startup-profiler profile --dockerfile Dockerfile-17 + + # Profile existing image + pg-startup-profiler profile --image pg-docker-test:17 + + # JSON output + pg-startup-profiler profile --image pg-docker-test:17 --json + + # Compare two images + pg-startup-profiler compare --baseline img:v1 --candidate img:v2 +`, + Version: version, +} + +func main() { + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} +``` + +**Step 5: Commit** + +```bash +git add nix/packages/pg-startup-profiler/ +git commit -m "feat(pg-startup-profiler): scaffold project structure" +``` + +--- + +## Task 2: Rules System + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/rules/rules.go` +- Create: `nix/packages/pg-startup-profiler/internal/rules/rules_test.go` +- Create: `nix/packages/pg-startup-profiler/rules/default.yaml` + +**Step 1: Write failing test for rules loading** + +```go +// nix/packages/pg-startup-profiler/internal/rules/rules_test.go +package rules + +import ( + "testing" +) + +func TestLoadRules(t *testing.T) { + yaml := ` +patterns: + - name: "test_pattern" + regex: 'database system is ready' + marks_ready: true + +timestamp: + regex: '^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +` + rules, err := LoadFromYAML([]byte(yaml)) + if err != nil { + t.Fatalf("failed to load rules: %v", err) + } + + if len(rules.Patterns) != 1 { + t.Errorf("expected 1 pattern, got %d", len(rules.Patterns)) + } + + if rules.Patterns[0].Name != "test_pattern" { + t.Errorf("expected name 'test_pattern', got '%s'", rules.Patterns[0].Name) + } + + if !rules.Patterns[0].MarksReady { + t.Error("expected marks_ready to be true") + } +} + +func TestPatternMatch(t *testing.T) { + yaml := ` +patterns: + - name: "ready" + regex: 'database system is ready to accept connections' + marks_ready: true + +timestamp: + regex: '^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +` + rules, _ := LoadFromYAML([]byte(yaml)) + + line := "2026-01-30 13:18:21.286 UTC [41] LOG: database system is ready to accept connections" + match := rules.Match(line) + + if match == nil { + t.Fatal("expected match, got nil") + } + + if match.Pattern.Name != "ready" { + t.Errorf("expected pattern 'ready', got '%s'", match.Pattern.Name) + } + + if match.Timestamp.IsZero() { + t.Error("expected timestamp to be parsed") + } +} +``` + +**Step 2: Run test to verify it fails** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/rules/... -v +``` + +Expected: FAIL - package not found + +**Step 3: Implement rules system** + +```go +// nix/packages/pg-startup-profiler/internal/rules/rules.go +package rules + +import ( + "regexp" + "time" + + "gopkg.in/yaml.v3" +) + +type Pattern struct { + Name string `yaml:"name"` + Regex string `yaml:"regex"` + Occurrence int `yaml:"occurrence,omitempty"` + MarksReady bool `yaml:"marks_ready,omitempty"` + Capture string `yaml:"capture,omitempty"` + + compiled *regexp.Regexp + seen int +} + +type TimestampConfig struct { + Regex string `yaml:"regex"` + Format string `yaml:"format"` + + compiled *regexp.Regexp +} + +type Rules struct { + Patterns []*Pattern `yaml:"patterns"` + Timestamp TimestampConfig `yaml:"timestamp"` +} + +type Match struct { + Pattern *Pattern + Timestamp time.Time + Captures map[string]string + Line string +} + +func LoadFromYAML(data []byte) (*Rules, error) { + var rules Rules + if err := yaml.Unmarshal(data, &rules); err != nil { + return nil, err + } + + // Compile patterns + for _, p := range rules.Patterns { + compiled, err := regexp.Compile(p.Regex) + if err != nil { + return nil, err + } + p.compiled = compiled + if p.Occurrence == 0 { + p.Occurrence = 1 + } + } + + // Compile timestamp regex + if rules.Timestamp.Regex != "" { + compiled, err := regexp.Compile(rules.Timestamp.Regex) + if err != nil { + return nil, err + } + rules.Timestamp.compiled = compiled + } + + return &rules, nil +} + +func (r *Rules) Match(line string) *Match { + for _, p := range r.Patterns { + if p.compiled.MatchString(line) { + p.seen++ + if p.seen == p.Occurrence { + match := &Match{ + Pattern: p, + Line: line, + Captures: make(map[string]string), + } + + // Extract timestamp + if r.Timestamp.compiled != nil { + if ts := r.Timestamp.compiled.FindStringSubmatch(line); len(ts) > 1 { + if t, err := time.Parse(r.Timestamp.Format, ts[1]); err == nil { + match.Timestamp = t + } + } + } + + // Extract named captures + if p.Capture != "" { + names := p.compiled.SubexpNames() + matches := p.compiled.FindStringSubmatch(line) + for i, name := range names { + if name != "" && i < len(matches) { + match.Captures[name] = matches[i] + } + } + } + + return match + } + } + } + return nil +} + +func (r *Rules) Reset() { + for _, p := range r.Patterns { + p.seen = 0 + } +} +``` + +**Step 4: Run test to verify it passes** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/rules/... -v +``` + +Expected: PASS + +**Step 5: Create default rules** + +```yaml +# nix/packages/pg-startup-profiler/rules/default.yaml +patterns: + - name: "initdb_start" + regex: 'running bootstrap script' + + - name: "initdb_complete" + regex: 'syncing data to disk' + + - name: "temp_server_start" + regex: 'database system is ready to accept connections' + occurrence: 1 + + - name: "server_shutdown" + regex: 'database system is shut down' + + - name: "final_server_ready" + regex: 'database system is ready to accept connections' + occurrence: 2 + marks_ready: true + + - name: "extension_load" + regex: 'statement: CREATE EXTENSION.*"(?P[^"]+)"' + capture: extension + + - name: "bgworker_start" + regex: '(?Ppg_cron|pg_net).*started' + capture: worker + + - name: "migration_file" + regex: 'running (?P/docker-entrypoint-initdb\.d/[^\s]+)' + capture: file + +timestamp: + regex: '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +``` + +**Step 6: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/rules/ nix/packages/pg-startup-profiler/rules/ +git commit -m "feat(pg-startup-profiler): add pluggable rules system" +``` + +--- + +## Task 3: Docker Client + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/docker/client.go` +- Create: `nix/packages/pg-startup-profiler/internal/docker/client_test.go` + +**Step 1: Write failing test** + +```go +// nix/packages/pg-startup-profiler/internal/docker/client_test.go +package docker + +import ( + "testing" +) + +func TestNewClient(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Skipf("Docker not available: %v", err) + } + defer client.Close() + + if client.cli == nil { + t.Error("expected client to be initialized") + } +} +``` + +**Step 2: Run test to verify it fails** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/docker/... -v +``` + +**Step 3: Implement Docker client** + +```go +// nix/packages/pg-startup-profiler/internal/docker/client.go +package docker + +import ( + "bufio" + "context" + "fmt" + "io" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +type Client struct { + cli *client.Client +} + +type ContainerInfo struct { + ID string + CgroupID uint64 + StartTime time.Time +} + +func NewClient() (*Client, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, fmt.Errorf("failed to create docker client: %w", err) + } + return &Client{cli: cli}, nil +} + +func (c *Client) Close() error { + return c.cli.Close() +} + +func (c *Client) ImageExists(ctx context.Context, imageName string) (bool, error) { + _, _, err := c.cli.ImageInspectWithRaw(ctx, imageName) + if err != nil { + if client.IsErrNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} + +func (c *Client) BuildImage(ctx context.Context, dockerfile, contextPath, tag string) error { + // Implementation for building from Dockerfile + // Uses docker build API + return fmt.Errorf("not implemented - use docker build externally") +} + +func (c *Client) CreateContainer(ctx context.Context, imageName string, env []string) (*ContainerInfo, error) { + resp, err := c.cli.ContainerCreate(ctx, &container.Config{ + Image: imageName, + Env: env, + }, &container.HostConfig{}, nil, nil, "") + if err != nil { + return nil, fmt.Errorf("failed to create container: %w", err) + } + + return &ContainerInfo{ + ID: resp.ID, + }, nil +} + +func (c *Client) StartContainer(ctx context.Context, containerID string) (time.Time, error) { + startTime := time.Now() + if err := c.cli.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { + return time.Time{}, fmt.Errorf("failed to start container: %w", err) + } + return startTime, nil +} + +func (c *Client) StopContainer(ctx context.Context, containerID string) error { + timeout := 10 + return c.cli.ContainerStop(ctx, containerID, container.StopOptions{Timeout: &timeout}) +} + +func (c *Client) RemoveContainer(ctx context.Context, containerID string) error { + return c.cli.ContainerRemove(ctx, containerID, container.RemoveOptions{Force: true}) +} + +func (c *Client) GetContainerCgroupID(ctx context.Context, containerID string) (uint64, error) { + inspect, err := c.cli.ContainerInspect(ctx, containerID) + if err != nil { + return 0, err + } + // The cgroup path contains the container ID + // We need to get the cgroup inode for eBPF filtering + // This is platform-specific and may need adjustment + _ = inspect + return 0, fmt.Errorf("cgroup ID extraction not implemented") +} + +func (c *Client) StreamLogs(ctx context.Context, containerID string, callback func(line string, timestamp time.Time)) error { + options := container.LogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + Timestamps: true, + } + + reader, err := c.cli.ContainerLogs(ctx, containerID, options) + if err != nil { + return err + } + defer reader.Close() + + // Docker multiplexes stdout/stderr, need to demux + pr, pw := io.Pipe() + go func() { + stdcopy.StdCopy(pw, pw, reader) + pw.Close() + }() + + scanner := bufio.NewScanner(pr) + for scanner.Scan() { + line := scanner.Text() + // Docker prepends timestamp when Timestamps: true + callback(line, time.Now()) + } + + return scanner.Err() +} + +func (c *Client) PullImage(ctx context.Context, imageName string) error { + reader, err := c.cli.ImagePull(ctx, imageName, image.PullOptions{}) + if err != nil { + return err + } + defer reader.Close() + io.Copy(io.Discard, reader) + return nil +} +``` + +**Step 4: Run test** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/docker/... -v +``` + +**Step 5: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/docker/ +git commit -m "feat(pg-startup-profiler): add Docker client wrapper" +``` + +--- + +## Task 4: Log Parser + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/logs/parser.go` +- Create: `nix/packages/pg-startup-profiler/internal/logs/parser_test.go` + +**Step 1: Write failing test** + +```go +// nix/packages/pg-startup-profiler/internal/logs/parser_test.go +package logs + +import ( + "testing" + "time" + + "github.com/supabase/pg-startup-profiler/internal/rules" +) + +func TestParser(t *testing.T) { + rulesYAML := ` +patterns: + - name: "ready" + regex: 'database system is ready to accept connections' + marks_ready: true + +timestamp: + regex: '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +` + r, _ := rules.LoadFromYAML([]byte(rulesYAML)) + parser := NewParser(r) + + events := make(chan Event, 10) + go func() { + parser.ParseLine("2026-01-30 13:18:21.286 UTC [41] LOG: database system is ready to accept connections", events) + close(events) + }() + + event := <-events + if event.Name != "ready" { + t.Errorf("expected event name 'ready', got '%s'", event.Name) + } + + if event.MarksReady != true { + t.Error("expected event to mark ready") + } +} +``` + +**Step 2: Run test to verify it fails** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/logs/... -v +``` + +**Step 3: Implement log parser** + +```go +// nix/packages/pg-startup-profiler/internal/logs/parser.go +package logs + +import ( + "time" + + "github.com/supabase/pg-startup-profiler/internal/rules" +) + +type Event struct { + Name string + Timestamp time.Time + Captures map[string]string + Line string + MarksReady bool +} + +type Parser struct { + rules *rules.Rules +} + +func NewParser(r *rules.Rules) *Parser { + return &Parser{rules: r} +} + +func (p *Parser) ParseLine(line string, events chan<- Event) { + match := p.rules.Match(line) + if match != nil { + events <- Event{ + Name: match.Pattern.Name, + Timestamp: match.Timestamp, + Captures: match.Captures, + Line: line, + MarksReady: match.Pattern.MarksReady, + } + } +} + +func (p *Parser) Reset() { + p.rules.Reset() +} +``` + +**Step 4: Run test** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/logs/... -v +``` + +**Step 5: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/logs/ +git commit -m "feat(pg-startup-profiler): add log parser" +``` + +--- + +## Task 5: Timeline and Event Aggregation + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/report/timeline.go` +- Create: `nix/packages/pg-startup-profiler/internal/report/timeline_test.go` + +**Step 1: Write failing test** + +```go +// nix/packages/pg-startup-profiler/internal/report/timeline_test.go +package report + +import ( + "testing" + "time" +) + +func TestTimeline(t *testing.T) { + tl := NewTimeline() + + start := time.Now() + tl.AddEvent(Event{ + Type: EventTypeDocker, + Name: "container_start", + Timestamp: start, + }) + + tl.AddEvent(Event{ + Type: EventTypeLog, + Name: "final_server_ready", + Timestamp: start.Add(5 * time.Second), + }) + + tl.Finalize() + + if tl.TotalDuration != 5*time.Second { + t.Errorf("expected 5s duration, got %v", tl.TotalDuration) + } + + if len(tl.Events) != 2 { + t.Errorf("expected 2 events, got %d", len(tl.Events)) + } +} +``` + +**Step 2: Run test to verify it fails** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/report/... -v +``` + +**Step 3: Implement timeline** + +```go +// nix/packages/pg-startup-profiler/internal/report/timeline.go +package report + +import ( + "sort" + "time" +) + +type EventType string + +const ( + EventTypeDocker EventType = "DOCKER" + EventTypeExec EventType = "EXEC" + EventTypeOpen EventType = "OPEN" + EventTypeLog EventType = "LOG" +) + +type Event struct { + Type EventType + Name string + Timestamp time.Time + Duration time.Duration + Details string + Captures map[string]string + MarksReady bool +} + +type Phase struct { + Name string + Start time.Time + End time.Time + Duration time.Duration + Percent float64 +} + +type Timeline struct { + Events []Event + Phases []Phase + TotalDuration time.Duration + StartTime time.Time + EndTime time.Time + Extensions []ExtensionTiming + InitScripts []ScriptTiming + BGWorkers []WorkerTiming +} + +type ExtensionTiming struct { + Name string + LoadTime time.Duration +} + +type ScriptTiming struct { + Path string + Duration time.Duration +} + +type WorkerTiming struct { + Name string + StartedAt time.Duration +} + +func NewTimeline() *Timeline { + return &Timeline{ + Events: make([]Event, 0), + } +} + +func (t *Timeline) AddEvent(e Event) { + t.Events = append(t.Events, e) +} + +func (t *Timeline) Finalize() { + if len(t.Events) == 0 { + return + } + + // Sort by timestamp + sort.Slice(t.Events, func(i, j int) bool { + return t.Events[i].Timestamp.Before(t.Events[j].Timestamp) + }) + + t.StartTime = t.Events[0].Timestamp + + // Find the ready event + for _, e := range t.Events { + if e.MarksReady { + t.EndTime = e.Timestamp + break + } + } + + if t.EndTime.IsZero() { + t.EndTime = t.Events[len(t.Events)-1].Timestamp + } + + t.TotalDuration = t.EndTime.Sub(t.StartTime) + + // Calculate relative timestamps + for i := range t.Events { + t.Events[i].Duration = t.Events[i].Timestamp.Sub(t.StartTime) + } + + // Extract extension timings + t.extractExtensions() + + // Extract init script timings + t.extractInitScripts() + + // Extract background worker timings + t.extractBGWorkers() + + // Build phases + t.buildPhases() +} + +func (t *Timeline) extractExtensions() { + for _, e := range t.Events { + if e.Name == "extension_load" { + if ext, ok := e.Captures["extension"]; ok { + t.Extensions = append(t.Extensions, ExtensionTiming{ + Name: ext, + LoadTime: e.Duration, + }) + } + } + } +} + +func (t *Timeline) extractInitScripts() { + var lastScript string + var lastTime time.Time + + for _, e := range t.Events { + if e.Name == "migration_file" { + if file, ok := e.Captures["file"]; ok { + if lastScript != "" { + t.InitScripts = append(t.InitScripts, ScriptTiming{ + Path: lastScript, + Duration: e.Timestamp.Sub(lastTime), + }) + } + lastScript = file + lastTime = e.Timestamp + } + } + } +} + +func (t *Timeline) extractBGWorkers() { + for _, e := range t.Events { + if e.Name == "bgworker_start" { + if worker, ok := e.Captures["worker"]; ok { + t.BGWorkers = append(t.BGWorkers, WorkerTiming{ + Name: worker, + StartedAt: e.Duration, + }) + } + } + } +} + +func (t *Timeline) buildPhases() { + // Simplified phase detection + // In practice, would use more sophisticated logic based on events + t.Phases = []Phase{ + {Name: "Total", Duration: t.TotalDuration, Percent: 100.0}, + } +} +``` + +**Step 4: Run test** + +```bash +cd nix/packages/pg-startup-profiler && go test ./internal/report/... -v +``` + +**Step 5: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/report/ +git commit -m "feat(pg-startup-profiler): add timeline event aggregation" +``` + +--- + +## Task 6: CLI Table Output + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/report/table.go` + +**Step 1: Implement table output** + +```go +// nix/packages/pg-startup-profiler/internal/report/table.go +package report + +import ( + "fmt" + "io" + "sort" + "strings" + "time" +) + +func PrintTable(w io.Writer, imageName string, tl *Timeline) { + fmt.Fprintln(w, strings.Repeat("=", 80)) + fmt.Fprintln(w, "PostgreSQL Container Startup Profile") + fmt.Fprintln(w, strings.Repeat("=", 80)) + fmt.Fprintln(w) + fmt.Fprintf(w, "Image: %s\n", imageName) + fmt.Fprintf(w, "Total: %s\n", formatDuration(tl.TotalDuration)) + fmt.Fprintln(w) + + // Phases + fmt.Fprintln(w, "PHASES") + fmt.Fprintln(w, strings.Repeat("-", 80)) + fmt.Fprintf(w, " %-30s %-12s %-8s\n", "Phase", "Duration", "Pct") + fmt.Fprintln(w, " "+strings.Repeat("-", 50)) + for _, p := range tl.Phases { + fmt.Fprintf(w, " %-30s %-12s %5.1f%%\n", p.Name, formatDuration(p.Duration), p.Percent) + } + fmt.Fprintln(w) + + // Init scripts (top 5) + if len(tl.InitScripts) > 0 { + fmt.Fprintln(w, "INIT SCRIPTS (top 5 by duration)") + fmt.Fprintln(w, strings.Repeat("-", 80)) + + // Sort by duration + sorted := make([]ScriptTiming, len(tl.InitScripts)) + copy(sorted, tl.InitScripts) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].Duration > sorted[j].Duration + }) + + limit := 5 + if len(sorted) < limit { + limit = len(sorted) + } + + fmt.Fprintf(w, " %-50s %s\n", "Script", "Duration") + fmt.Fprintln(w, " "+strings.Repeat("-", 60)) + for _, s := range sorted[:limit] { + // Truncate path for display + path := s.Path + if len(path) > 48 { + path = "..." + path[len(path)-45:] + } + fmt.Fprintf(w, " %-50s %s\n", path, formatDuration(s.Duration)) + } + fmt.Fprintln(w) + } + + // Extensions + if len(tl.Extensions) > 0 { + fmt.Fprintln(w, "EXTENSIONS") + fmt.Fprintln(w, strings.Repeat("-", 80)) + fmt.Fprintf(w, " %-20s %s\n", "Extension", "Loaded at") + fmt.Fprintln(w, " "+strings.Repeat("-", 30)) + for _, e := range tl.Extensions { + fmt.Fprintf(w, " %-20s %s\n", e.Name, formatDuration(e.LoadTime)) + } + fmt.Fprintln(w) + } + + // Background workers + if len(tl.BGWorkers) > 0 { + fmt.Fprintln(w, "BACKGROUND WORKERS") + fmt.Fprintln(w, strings.Repeat("-", 80)) + fmt.Fprintf(w, " %-20s %s\n", "Worker", "Started at") + fmt.Fprintln(w, " "+strings.Repeat("-", 30)) + for _, w := range tl.BGWorkers { + fmt.Fprintf(w, " %-20s %s\n", w.Name, formatDuration(w.StartedAt)) + } + fmt.Fprintln(w) + } + + // Event timeline (verbose) + if len(tl.Events) > 0 { + fmt.Fprintln(w, "EVENT TIMELINE") + fmt.Fprintln(w, strings.Repeat("-", 80)) + for _, e := range tl.Events { + fmt.Fprintf(w, " [%s] %-8s %s\n", + formatDuration(e.Duration), + e.Type, + truncate(e.Name, 60)) + } + } +} + +func formatDuration(d time.Duration) string { + if d < time.Millisecond { + return fmt.Sprintf("%.3fms", float64(d.Microseconds())/1000) + } + if d < time.Second { + return fmt.Sprintf("%.0fms", float64(d.Milliseconds())) + } + return fmt.Sprintf("%.3fs", d.Seconds()) +} + +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} +``` + +**Step 2: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/report/table.go +git commit -m "feat(pg-startup-profiler): add CLI table output" +``` + +--- + +## Task 7: JSON Output + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/report/json.go` + +**Step 1: Implement JSON output** + +```go +// nix/packages/pg-startup-profiler/internal/report/json.go +package report + +import ( + "encoding/json" + "io" +) + +type JSONReport struct { + Image string `json:"image"` + TotalDurationMs int64 `json:"total_duration_ms"` + Phases []JSONPhase `json:"phases"` + InitScripts []JSONScript `json:"init_scripts"` + Extensions []JSONExtension `json:"extensions"` + BGWorkers []JSONWorker `json:"background_workers"` + Events []JSONEvent `json:"events,omitempty"` +} + +type JSONPhase struct { + Name string `json:"name"` + DurationMs int64 `json:"duration_ms"` + Percent float64 `json:"pct"` +} + +type JSONScript struct { + Path string `json:"path"` + DurationMs int64 `json:"duration_ms"` +} + +type JSONExtension struct { + Name string `json:"name"` + LoadTimeMs int64 `json:"load_time_ms"` +} + +type JSONWorker struct { + Name string `json:"name"` + StartedAtMs int64 `json:"started_at_ms"` +} + +type JSONEvent struct { + Type string `json:"type"` + Name string `json:"name"` + OffsetMs int64 `json:"offset_ms"` + Captures map[string]string `json:"captures,omitempty"` +} + +func PrintJSON(w io.Writer, imageName string, tl *Timeline, verbose bool) error { + report := JSONReport{ + Image: imageName, + TotalDurationMs: tl.TotalDuration.Milliseconds(), + } + + for _, p := range tl.Phases { + report.Phases = append(report.Phases, JSONPhase{ + Name: p.Name, + DurationMs: p.Duration.Milliseconds(), + Percent: p.Percent, + }) + } + + for _, s := range tl.InitScripts { + report.InitScripts = append(report.InitScripts, JSONScript{ + Path: s.Path, + DurationMs: s.Duration.Milliseconds(), + }) + } + + for _, e := range tl.Extensions { + report.Extensions = append(report.Extensions, JSONExtension{ + Name: e.Name, + LoadTimeMs: e.LoadTime.Milliseconds(), + }) + } + + for _, w := range tl.BGWorkers { + report.BGWorkers = append(report.BGWorkers, JSONWorker{ + Name: w.Name, + StartedAtMs: w.StartedAt.Milliseconds(), + }) + } + + if verbose { + for _, e := range tl.Events { + report.Events = append(report.Events, JSONEvent{ + Type: string(e.Type), + Name: e.Name, + OffsetMs: e.Duration.Milliseconds(), + Captures: e.Captures, + }) + } + } + + encoder := json.NewEncoder(w) + encoder.SetIndent("", " ") + return encoder.Encode(report) +} +``` + +**Step 2: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/report/json.go +git commit -m "feat(pg-startup-profiler): add JSON output" +``` + +--- + +## Task 8: eBPF Tracer (Stub for Linux) + +**Files:** +- Create: `nix/packages/pg-startup-profiler/internal/ebpf/tracer.go` +- Create: `nix/packages/pg-startup-profiler/internal/ebpf/tracer_stub.go` + +**Step 1: Create tracer interface and stub** + +```go +// nix/packages/pg-startup-profiler/internal/ebpf/tracer.go +//go:build linux + +package ebpf + +import ( + "context" + "time" +) + +type ExecEvent struct { + Timestamp time.Time + Comm string + Args string + PID uint32 +} + +type OpenEvent struct { + Timestamp time.Time + Path string + PID uint32 +} + +type Tracer struct { + cgroupID uint64 + execChan chan ExecEvent + openChan chan OpenEvent +} + +func NewTracer(cgroupID uint64) (*Tracer, error) { + return &Tracer{ + cgroupID: cgroupID, + execChan: make(chan ExecEvent, 1000), + openChan: make(chan OpenEvent, 1000), + }, nil +} + +func (t *Tracer) Start(ctx context.Context) error { + // TODO: Implement actual eBPF probe attachment + // This requires: + // 1. Load eBPF program from embedded bytecode + // 2. Attach to tracepoints + // 3. Set up perf buffer for events + // 4. Filter by cgroup ID + return nil +} + +func (t *Tracer) Stop() error { + close(t.execChan) + close(t.openChan) + return nil +} + +func (t *Tracer) ExecEvents() <-chan ExecEvent { + return t.execChan +} + +func (t *Tracer) OpenEvents() <-chan OpenEvent { + return t.openChan +} +``` + +```go +// nix/packages/pg-startup-profiler/internal/ebpf/tracer_stub.go +//go:build !linux + +package ebpf + +import ( + "context" + "fmt" + "time" +) + +type ExecEvent struct { + Timestamp time.Time + Comm string + Args string + PID uint32 +} + +type OpenEvent struct { + Timestamp time.Time + Path string + PID uint32 +} + +type Tracer struct { + execChan chan ExecEvent + openChan chan OpenEvent +} + +func NewTracer(cgroupID uint64) (*Tracer, error) { + return nil, fmt.Errorf("eBPF tracing is only supported on Linux") +} + +func (t *Tracer) Start(ctx context.Context) error { + return fmt.Errorf("eBPF tracing is only supported on Linux") +} + +func (t *Tracer) Stop() error { + return nil +} + +func (t *Tracer) ExecEvents() <-chan ExecEvent { + return nil +} + +func (t *Tracer) OpenEvents() <-chan OpenEvent { + return nil +} +``` + +**Step 2: Commit** + +```bash +git add nix/packages/pg-startup-profiler/internal/ebpf/ +git commit -m "feat(pg-startup-profiler): add eBPF tracer interface and stub" +``` + +--- + +## Task 9: Profile Command + +**Files:** +- Create: `nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile.go` + +**Step 1: Implement profile command** + +```go +// nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile.go +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/spf13/cobra" + "github.com/supabase/pg-startup-profiler/internal/docker" + "github.com/supabase/pg-startup-profiler/internal/logger" + "github.com/supabase/pg-startup-profiler/internal/logs" + "github.com/supabase/pg-startup-profiler/internal/report" + "github.com/supabase/pg-startup-profiler/internal/rules" + + _ "embed" +) + +//go:embed ../../rules/default.yaml +var defaultRulesYAML []byte + +var ( + flagImage string + flagDockerfile string + flagJSON bool + flagVerbose bool + flagRulesFile string + flagTimeout time.Duration +) + +func init() { + profileCmd.Flags().StringVar(&flagImage, "image", "", "Docker image to profile") + profileCmd.Flags().StringVar(&flagDockerfile, "dockerfile", "", "Dockerfile to build and profile") + profileCmd.Flags().BoolVar(&flagJSON, "json", false, "Output as JSON") + profileCmd.Flags().BoolVar(&flagVerbose, "verbose", false, "Include full event timeline") + profileCmd.Flags().StringVar(&flagRulesFile, "rules", "", "Custom rules YAML file") + profileCmd.Flags().DurationVar(&flagTimeout, "timeout", 5*time.Minute, "Timeout for container startup") + + rootCmd.AddCommand(profileCmd) +} + +var profileCmd = &cobra.Command{ + Use: "profile", + Short: "Profile container startup time", + Long: "Profile a PostgreSQL container's startup time and show breakdown", + RunE: runProfile, +} + +func runProfile(cmd *cobra.Command, args []string) error { + log := logger.Setup(flagVerbose, false) + + if flagImage == "" && flagDockerfile == "" { + return fmt.Errorf("either --image or --dockerfile is required") + } + + // Load rules + var rulesData []byte + if flagRulesFile != "" { + data, err := os.ReadFile(flagRulesFile) + if err != nil { + return fmt.Errorf("failed to read rules file: %w", err) + } + rulesData = data + } else { + rulesData = defaultRulesYAML + } + + r, err := rules.LoadFromYAML(rulesData) + if err != nil { + return fmt.Errorf("failed to load rules: %w", err) + } + + // Create Docker client + dockerClient, err := docker.NewClient() + if err != nil { + return fmt.Errorf("failed to create docker client: %w", err) + } + defer dockerClient.Close() + + ctx, cancel := context.WithTimeout(context.Background(), flagTimeout) + defer cancel() + + // Handle signals + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + cancel() + }() + + imageName := flagImage + if flagDockerfile != "" { + // Build image + log.Info("Building image from Dockerfile", "dockerfile", flagDockerfile) + imageName = fmt.Sprintf("pg-startup-profiler-test:%d", time.Now().Unix()) + // For now, shell out to docker build + return fmt.Errorf("--dockerfile not yet implemented, please build image first and use --image") + } + + // Check image exists + exists, err := dockerClient.ImageExists(ctx, imageName) + if err != nil { + return fmt.Errorf("failed to check image: %w", err) + } + if !exists { + return fmt.Errorf("image not found: %s", imageName) + } + + log.Info("Profiling container startup", "image", imageName) + + // Create timeline + timeline := report.NewTimeline() + parser := logs.NewParser(r) + + // Create container + env := []string{"POSTGRES_PASSWORD=postgres"} + container, err := dockerClient.CreateContainer(ctx, imageName, env) + if err != nil { + return fmt.Errorf("failed to create container: %w", err) + } + defer func() { + dockerClient.StopContainer(context.Background(), container.ID) + dockerClient.RemoveContainer(context.Background(), container.ID) + }() + + // Start log streaming + logEvents := make(chan logs.Event, 100) + logDone := make(chan error, 1) + go func() { + err := dockerClient.StreamLogs(ctx, container.ID, func(line string, ts time.Time) { + parser.ParseLine(line, logEvents) + }) + logDone <- err + }() + + // Start container and record time + startTime, err := dockerClient.StartContainer(ctx, container.ID) + if err != nil { + return fmt.Errorf("failed to start container: %w", err) + } + + timeline.AddEvent(report.Event{ + Type: report.EventTypeDocker, + Name: "container_start", + Timestamp: startTime, + }) + + // Wait for ready or timeout + ready := false + for !ready { + select { + case event := <-logEvents: + timeline.AddEvent(report.Event{ + Type: report.EventTypeLog, + Name: event.Name, + Timestamp: event.Timestamp, + Captures: event.Captures, + MarksReady: event.MarksReady, + }) + if event.MarksReady { + ready = true + } + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for container to be ready") + case err := <-logDone: + if err != nil && !ready { + return fmt.Errorf("log streaming error: %w", err) + } + } + } + + // Finalize timeline + timeline.Finalize() + + // Output results + if flagJSON { + return report.PrintJSON(os.Stdout, imageName, timeline, flagVerbose) + } + report.PrintTable(os.Stdout, imageName, timeline) + return nil +} +``` + +**Step 2: Update main.go imports** + +Ensure go.mod is updated and run: + +```bash +cd nix/packages/pg-startup-profiler && go mod tidy +``` + +**Step 3: Commit** + +```bash +git add nix/packages/pg-startup-profiler/ +git commit -m "feat(pg-startup-profiler): add profile command" +``` + +--- + +## Task 10: Nix Package Integration + +**Files:** +- Create: `nix/packages/pg-startup-profiler.nix` +- Modify: `nix/packages/default.nix` +- Modify: `nix/apps.nix` + +**Step 1: Create Nix package** + +```nix +# nix/packages/pg-startup-profiler.nix +{ pkgs, lib, ... }: +let + pg-startup-profiler = pkgs.buildGoModule { + pname = "pg-startup-profiler"; + version = "0.1.0"; + + src = ./pg-startup-profiler; + + vendorHash = null; # Will be updated after first build attempt + + subPackages = [ "cmd/pg-startup-profiler" ]; + + # Disable CGO for simpler builds (eBPF stub for non-Linux) + env.CGO_ENABLED = "0"; + + ldflags = [ + "-s" + "-w" + "-X main.version=0.1.0" + ]; + + doCheck = true; + + meta = with lib; { + description = "PostgreSQL container startup profiler"; + license = licenses.asl20; + platforms = platforms.linux ++ platforms.darwin; + }; + }; +in +{ + inherit pg-startup-profiler; +} +``` + +**Step 2: Add to default.nix** + +Add after line 22 (after supascan-pkgs): + +```nix +pg-startup-profiler-pkgs = pkgs.callPackage ./pg-startup-profiler.nix { + inherit (pkgs) lib; +}; +``` + +Add to packages (after supascan): + +```nix +inherit (pg-startup-profiler-pkgs) pg-startup-profiler; +``` + +**Step 3: Add to apps.nix** + +Add to the apps attribute set: + +```nix +pg-startup-profiler = mkApp "pg-startup-profiler"; +``` + +**Step 4: Build and get vendor hash** + +```bash +nix build .#pg-startup-profiler 2>&1 | grep "got:" +``` + +Update vendorHash in pg-startup-profiler.nix with the actual hash. + +**Step 5: Commit** + +```bash +git add nix/packages/pg-startup-profiler.nix nix/packages/default.nix nix/apps.nix +git commit -m "feat(pg-startup-profiler): add Nix packaging" +``` + +--- + +## Task 11: Integration Test + +**Files:** +- Create: `nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile_test.go` + +**Step 1: Write integration test** + +```go +// nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile_test.go +//go:build integration + +package main + +import ( + "os/exec" + "strings" + "testing" +) + +func TestProfileIntegration(t *testing.T) { + // Skip if docker is not available + if _, err := exec.LookPath("docker"); err != nil { + t.Skip("docker not available") + } + + // This test requires a pre-built image + // In CI, this would be built first + cmd := exec.Command("go", "run", ".", "profile", "--image", "postgres:16-alpine", "--timeout", "2m") + output, err := cmd.CombinedOutput() + + if err != nil { + t.Fatalf("profile command failed: %v\nOutput: %s", err, output) + } + + // Check output contains expected sections + outputStr := string(output) + if !strings.Contains(outputStr, "PostgreSQL Container Startup Profile") { + t.Error("output missing header") + } + if !strings.Contains(outputStr, "Total:") { + t.Error("output missing total duration") + } +} +``` + +**Step 2: Commit** + +```bash +git add nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile_test.go +git commit -m "test(pg-startup-profiler): add integration test" +``` + +--- + +## Summary + +Tasks 1-11 implement the core `pg-startup-profiler` tool with: +- Pluggable YAML rules for log pattern matching +- Docker client for container lifecycle +- Log parsing with PostgreSQL timestamp extraction +- Timeline event aggregation +- CLI table and JSON output +- eBPF tracer stub (full implementation is Task 12+) +- Nix packaging following repo patterns + +The eBPF tracing (Task 12+) can be implemented later to add syscall-level visibility. The tool is functional without it using log parsing alone. diff --git a/nix/apps.nix b/nix/apps.nix index d3776d09d..43817dba3 100644 --- a/nix/apps.nix +++ b/nix/apps.nix @@ -21,6 +21,7 @@ pg-restore = mkApp "pg-restore"; local-infra-bootstrap = mkApp "local-infra-bootstrap"; dbmate-tool = mkApp "dbmate-tool"; + image-size-analyzer = mkApp "image-size-analyzer"; update-readme = mkApp "update-readme"; show-commands = mkApp "show-commands"; build-test-ami = mkApp "build-test-ami"; @@ -28,6 +29,9 @@ cleanup-ami = mkApp "cleanup-ami"; trigger-nix-build = mkApp "trigger-nix-build"; supascan = mkApp "supascan"; + pg-startup-profiler = mkApp "pg-startup-profiler"; + docker-image-test = mkApp "docker-image-test"; + cli-smoke-test = mkApp "cli-smoke-test"; }; }; } diff --git a/nix/checks.nix b/nix/checks.nix index 4af756f1d..73fe0f887 100644 --- a/nix/checks.nix +++ b/nix/checks.nix @@ -27,6 +27,10 @@ # deadnix: skip makeCheckHarness = pgpkg: + # legacyPkgName: the name used in legacyPackages (e.g., "psql_17" or "psql_17_slim") + { + legacyPkgName ? null, + }: let pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; inherit (self'.packages) pg_regress; @@ -86,11 +90,23 @@ in builtins.trace "Major version result: ${result}" result; + # Determine the legacy package name for selecting extensions + effectiveLegacyPkgName = if legacyPkgName != null then legacyPkgName else "psql_${majorVersion}"; + # Select the appropriate pgroonga package for this PostgreSQL version - pgroonga = self'.legacyPackages."psql_${majorVersion}".exts.pgroonga; + pgroonga = self'.legacyPackages.${effectiveLegacyPkgName}.exts.pgroonga; + # Use different ports to allow parallel test runs + # slim packages get their own ports to avoid conflicts + isSlim = lib.hasSuffix "_slim" effectiveLegacyPkgName; pgPort = - if (majorVersion == "17") then + if (majorVersion == "17" && isSlim) then + "5538" + else if (majorVersion == "15" && isSlim) then + "5539" + else if (majorVersion == "orioledb-17" && isSlim) then + "5540" + else if (majorVersion == "17") then "5535" else if (majorVersion == "15") then "5536" @@ -351,6 +367,8 @@ CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; + INSERT INTO test_config (key, value) VALUES ('http_mock_host', 'localhost') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; " SORTED_DIR=$(mktemp -d) for t in $(printf "%s\n" ${builtins.concatStringsSep " " sortedTestList}); do @@ -395,6 +413,8 @@ CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; + INSERT INTO test_config (key, value) VALUES ('http_mock_host', 'localhost') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; " #shellcheck disable=SC2154 @@ -423,19 +443,40 @@ in { psql_15 = pkgs.runCommand "run-check-harness-psql-15" { } ( - lib.getExe (makeCheckHarness self'.packages."psql_15/bin") + lib.getExe (makeCheckHarness self'.packages."psql_15/bin" { legacyPkgName = "psql_15"; }) ); psql_17 = pkgs.runCommand "run-check-harness-psql-17" { } ( - lib.getExe (makeCheckHarness self'.packages."psql_17/bin") + lib.getExe (makeCheckHarness self'.packages."psql_17/bin" { legacyPkgName = "psql_17"; }) ); psql_orioledb-17 = pkgs.runCommand "run-check-harness-psql-orioledb-17" { } ( - lib.getExe (makeCheckHarness self'.packages."psql_orioledb-17/bin") + lib.getExe ( + makeCheckHarness self'.packages."psql_orioledb-17/bin" { legacyPkgName = "psql_orioledb-17"; } + ) + ); + psql_15_slim = pkgs.runCommand "run-check-harness-psql-15-slim" { } ( + lib.getExe (makeCheckHarness self'.packages."psql_15_slim/bin" { legacyPkgName = "psql_15_slim"; }) + ); + psql_17_slim = pkgs.runCommand "run-check-harness-psql-17-slim" { } ( + lib.getExe (makeCheckHarness self'.packages."psql_17_slim/bin" { legacyPkgName = "psql_17_slim"; }) + ); + psql_orioledb-17_slim = pkgs.runCommand "run-check-harness-psql-orioledb-17-slim" { } ( + lib.getExe ( + makeCheckHarness self'.packages."psql_orioledb-17_slim/bin" { + legacyPkgName = "psql_orioledb-17_slim"; + } + ) ); inherit (self'.packages) - wal-g-2 - pg_regress + cli-smoke-test + docker-image-inputs + docker-image-test goss + image-size-analyzer + pg_regress + pg-startup-profiler + supabase-cli supascan + wal-g-2 ; devShell = self'.devShells.default; } diff --git a/nix/docs/image-size-analyzer-usage.md b/nix/docs/image-size-analyzer-usage.md new file mode 100644 index 000000000..dc5845b06 --- /dev/null +++ b/nix/docs/image-size-analyzer-usage.md @@ -0,0 +1,269 @@ +# Image Size Analyzer - Usage Guide + +A tool to analyze Docker image sizes for Supabase Postgres images, providing breakdowns by layers, directories, Nix packages, and APT packages. + +## Local Usage + +### Prerequisites + +- Nix with flakes enabled +- Docker daemon running + +### Basic Commands + +```bash +# Analyze all images (Dockerfile-15, Dockerfile-17, Dockerfile-orioledb-17) +# This will build all images first, then analyze them +nix run .#image-size-analyzer + +# Analyze a specific image +nix run .#image-size-analyzer -- --image Dockerfile-17 + +# Analyze multiple specific images +nix run .#image-size-analyzer -- --image Dockerfile-15 --image Dockerfile-17 + +# Skip building (analyze existing images) +# Images must already exist with the -analyze tag suffix +nix run .#image-size-analyzer -- --no-build + +# Output as JSON instead of TUI +nix run .#image-size-analyzer -- --json + +# Combine flags +nix run .#image-size-analyzer -- --image Dockerfile-17 --json --no-build +``` + +### Understanding the Output + +The TUI output shows four sections per image: + +1. **Total Size** - Overall image size +2. **Layers** - Top 10 Docker layers by size, showing which Dockerfile instructions add the most +3. **Directories** - Top 10 directories by size inside the image +4. **Nix Packages** - Top 15 Nix store packages by size (e.g., postgresql, postgis, extensions) +5. **APT Packages** - Top 15 Debian packages by size + +### Example Workflow + +```bash +# 1. Make changes to reduce image size (e.g., remove an extension) + +# 2. Build and analyze the specific image you changed +nix run .#image-size-analyzer -- --image Dockerfile-17 + +# 3. Compare with JSON output for precise numbers +nix run .#image-size-analyzer -- --image Dockerfile-17 --json > before.json + +# 4. Make more changes, then compare +nix run .#image-size-analyzer -- --image Dockerfile-17 --json > after.json +diff before.json after.json +``` + +--- + +## CI Usage + +### GitHub Actions Example + +```yaml +name: Image Size Analysis + +on: + pull_request: + paths: + - 'docker/**' + - 'nix/**' + workflow_dispatch: + +jobs: + analyze-image-size: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Nix + uses: cachix/install-nix-action@v27 + with: + extra_nix_config: | + extra-substituters = https://nix-postgres-artifacts.s3.amazonaws.com + extra-trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI= + + - name: Analyze image sizes + run: | + nix run .#image-size-analyzer -- --json > image-sizes.json + + - name: Upload size report + uses: actions/upload-artifact@v4 + with: + name: image-size-report + path: image-sizes.json + + - name: Comment PR with sizes + if: github.event_name == 'pull_request' + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const report = JSON.parse(fs.readFileSync('image-sizes.json', 'utf8')); + + let comment = '## Docker Image Size Report\n\n'; + for (const image of report.images) { + const sizeGB = (image.total_size_bytes / 1073741824).toFixed(2); + comment += `### ${image.dockerfile}: ${sizeGB} GB\n\n`; + + comment += '**Top 5 Nix Packages:**\n'; + for (const pkg of image.nix_packages.slice(0, 5)) { + const sizeMB = (pkg.size_bytes / 1048576).toFixed(1); + comment += `- ${pkg.name}: ${sizeMB} MB\n`; + } + comment += '\n'; + } + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: comment + }); +``` + +### Size Threshold Check + +Add a job that fails if images exceed a size threshold: + +```yaml + check-size-threshold: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Nix + uses: cachix/install-nix-action@v27 + with: + extra_nix_config: | + extra-substituters = https://nix-postgres-artifacts.s3.amazonaws.com + extra-trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI= + + - name: Check image sizes + run: | + nix run .#image-size-analyzer -- --json > sizes.json + + # Set threshold (2.5 GB in bytes) + THRESHOLD=2684354560 + + # Check each image + for dockerfile in Dockerfile-15 Dockerfile-17 Dockerfile-orioledb-17; do + size=$(jq -r ".images[] | select(.dockerfile == \"$dockerfile\") | .total_size_bytes" sizes.json) + if [ "$size" -gt "$THRESHOLD" ]; then + echo "ERROR: $dockerfile exceeds size threshold" + echo " Size: $((size / 1048576)) MB" + echo " Threshold: $((THRESHOLD / 1048576)) MB" + exit 1 + fi + echo "OK: $dockerfile = $((size / 1048576)) MB" + done +``` + +### Size Regression Check + +Compare against a baseline to catch size regressions: + +```yaml + check-size-regression: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Need full history for base branch + + - name: Install Nix + uses: cachix/install-nix-action@v27 + with: + extra_nix_config: | + extra-substituters = https://nix-postgres-artifacts.s3.amazonaws.com + extra-trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI= + + - name: Analyze PR branch + run: | + nix run .#image-size-analyzer -- --image Dockerfile-17 --json > pr-sizes.json + + - name: Analyze base branch + run: | + git checkout ${{ github.base_ref }} + nix run .#image-size-analyzer -- --image Dockerfile-17 --json > base-sizes.json + git checkout - + + - name: Compare sizes + run: | + PR_SIZE=$(jq -r '.images[0].total_size_bytes' pr-sizes.json) + BASE_SIZE=$(jq -r '.images[0].total_size_bytes' base-sizes.json) + + DIFF=$((PR_SIZE - BASE_SIZE)) + DIFF_MB=$((DIFF / 1048576)) + + # Allow up to 50MB increase + MAX_INCREASE=52428800 + + if [ "$DIFF" -gt "$MAX_INCREASE" ]; then + echo "ERROR: Image size increased by ${DIFF_MB}MB (max allowed: 50MB)" + echo "Base: $((BASE_SIZE / 1048576))MB" + echo "PR: $((PR_SIZE / 1048576))MB" + exit 1 + fi + + echo "Size change: ${DIFF_MB}MB" +``` + +--- + +## JSON Output Schema + +```json +{ + "images": [ + { + "dockerfile": "Dockerfile-17", + "total_size_bytes": 1954000000, + "layers": [ + { + "index": 0, + "size_bytes": 890000000, + "command": "COPY /nix/store /nix/store" + } + ], + "directories": [ + { + "path": "/nix/store", + "size_bytes": 1200000000 + } + ], + "nix_packages": [ + { + "name": "postgresql-17.6", + "size_bytes": 152000000 + } + ], + "apt_packages": [ + { + "name": "libc6", + "size_bytes": 12500000 + } + ] + } + ] +} +``` + +--- + +## Tips + +1. **Use `--no-build` for iteration** - Once you've built an image, use `--no-build` to quickly re-analyze without rebuilding. + +2. **Focus on Nix packages** - Most of the image size comes from `/nix/store/`. The Nix packages breakdown helps identify which extensions or dependencies are largest. + +3. **Check layers for optimization opportunities** - If a layer is unexpectedly large, investigate the corresponding Dockerfile instruction. + +4. **Use JSON for automation** - The JSON output is stable and can be parsed with `jq` for scripting and CI integration. + +5. **Compare before/after** - Always capture baseline sizes before making changes so you can measure the impact. diff --git a/nix/ext/hypopg.nix b/nix/ext/hypopg.nix index 92784a8de..b3e880a1b 100644 --- a/nix/ext/hypopg.nix +++ b/nix/ext/hypopg.nix @@ -4,6 +4,7 @@ buildEnv, fetchFromGitHub, postgresql, + latestOnly ? false, }: let @@ -14,7 +15,13 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; build = version: hash: stdenv.mkDerivation rec { @@ -66,9 +73,7 @@ let inherit (postgresql.meta) platforms; }; }; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); in buildEnv { name = pname; @@ -81,14 +86,19 @@ buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/index_advisor.nix b/nix/ext/index_advisor.nix index 85a23bfd0..3130d4bcc 100644 --- a/nix/ext/index_advisor.nix +++ b/nix/ext/index_advisor.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, callPackage, + latestOnly ? false, }: let @@ -15,7 +16,13 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; build = version: hash: stdenv.mkDerivation rec { @@ -64,9 +71,7 @@ let inherit (postgresql.meta) platforms; }; }; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); in pkgs.buildEnv { name = pname; @@ -78,8 +83,13 @@ pkgs.buildEnv { ]; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg-safeupdate.nix b/nix/ext/pg-safeupdate.nix index ee31f4371..97921c9c6 100644 --- a/nix/ext/pg-safeupdate.nix +++ b/nix/ext/pg-safeupdate.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, makeWrapper, + latestOnly ? false, }: let @@ -49,10 +50,14 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); in pkgs.buildEnv { name = pname; @@ -68,18 +73,23 @@ pkgs.buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; defaultSettings = { shared_preload_libraries = [ "safeupdate" ]; }; pgRegressTestName = "pg-safeupdate"; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_cron/default.nix b/nix/ext/pg_cron/default.nix index cec9d8ec4..a823fa450 100644 --- a/nix/ext/pg_cron/default.nix +++ b/nix/ext/pg_cron/default.nix @@ -6,6 +6,7 @@ buildEnv, makeWrapper, switch-ext-version, + latestOnly ? false, }: let pname = "pg_cron"; @@ -15,7 +16,13 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; build = version: versionData: stdenv.mkDerivation rec { @@ -71,7 +78,7 @@ let license = licenses.postgresql; }; }; - packages = builtins.attrValues (lib.mapAttrs (name: value: build name value) supportedVersions); + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value) versionsToUse); in buildEnv { name = pname; @@ -93,7 +100,7 @@ buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) @@ -109,13 +116,18 @@ buildEnv { }; passthru = { - inherit versions numberOfVersions switch-ext-version; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit switch-ext-version latestOnly; hasBackgroundWorker = true; defaultSettings = { shared_preload_libraries = [ "pg_cron" ]; "cron.database_name" = "postgres"; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_graphql/default.nix b/nix/ext/pg_graphql/default.nix index a7f6d1065..11ea156dc 100644 --- a/nix/ext/pg_graphql/default.nix +++ b/nix/ext/pg_graphql/default.nix @@ -7,6 +7,7 @@ postgresql, rust-bin, rsync, + latestOnly ? false, }: let @@ -124,9 +125,15 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash value.rust value.pgrx) supportedVersions + lib.mapAttrs (name: value: build name value.hash value.rust value.pgrx) versionsToUse ); in (buildEnv { @@ -167,14 +174,19 @@ in # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; }).overrideAttrs (_: { diff --git a/nix/ext/pg_hashids.nix b/nix/ext/pg_hashids.nix index b11c5ce68..c8fc3fef5 100644 --- a/nix/ext/pg_hashids.nix +++ b/nix/ext/pg_hashids.nix @@ -4,6 +4,7 @@ fetchFromGitHub, postgresql, buildEnv, + latestOnly ? false, }: let pname = "pg_hashids"; @@ -71,9 +72,15 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash (value.revision or name)) supportedVersions + lib.mapAttrs (name: value: build name value.hash (value.revision or name)) versionsToUse ); in buildEnv { @@ -87,14 +94,19 @@ buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_jsonschema/default.nix b/nix/ext/pg_jsonschema/default.nix index d3a72036f..4ec4f97a9 100644 --- a/nix/ext/pg_jsonschema/default.nix +++ b/nix/ext/pg_jsonschema/default.nix @@ -6,6 +6,7 @@ fetchFromGitHub, postgresql, rust-bin, + latestOnly ? false, }: let pname = "pg_jsonschema"; @@ -131,10 +132,16 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash value.rust value.pgrx) supportedVersions + lib.mapAttrs (name: value: build name value.hash value.rust value.pgrx) versionsToUse ); + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; in (pkgs.buildEnv { name = pname; @@ -147,7 +154,7 @@ in # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) @@ -171,9 +178,14 @@ in ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; }).overrideAttrs (_: { diff --git a/nix/ext/pg_net.nix b/nix/ext/pg_net.nix index 60304138d..09692b6ac 100644 --- a/nix/ext/pg_net.nix +++ b/nix/ext/pg_net.nix @@ -8,6 +8,7 @@ makeWrapper, switch-ext-version, curl_8_6, + latestOnly ? false, }: let @@ -98,15 +99,23 @@ let ) platformFilteredVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; in pkgs.buildEnv { name = pname; paths = packages; nativeBuildInputs = [ makeWrapper ]; + pathsToLink = [ + "/lib" + "/share/postgresql/extension" + ]; postBuild = '' { echo "default_version = '${latestVersion}'" @@ -118,7 +127,7 @@ pkgs.buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) @@ -127,12 +136,17 @@ pkgs.buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; hasBackgroundWorker = true; defaultSettings = { shared_preload_libraries = [ "pg_net" ]; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_partman.nix b/nix/ext/pg_partman.nix index 809861d52..4c36e765f 100644 --- a/nix/ext/pg_partman.nix +++ b/nix/ext/pg_partman.nix @@ -6,6 +6,7 @@ postgresql, makeWrapper, switch-ext-version, + latestOnly ? false, }: let @@ -60,10 +61,14 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); in pkgs.buildEnv { name = pname; @@ -86,7 +91,7 @@ pkgs.buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) @@ -95,12 +100,9 @@ pkgs.buildEnv { ''; passthru = { - inherit - versions - numberOfVersions - switch-ext-version - libName - ; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit switch-ext-version libName latestOnly; pname = "${pname}-all"; hasBackgroundWorker = true; defaultSchema = "partman"; @@ -108,6 +110,9 @@ pkgs.buildEnv { shared_preload_libraries = [ libName ]; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_plan_filter.nix b/nix/ext/pg_plan_filter.nix index dfb3262b7..402f6b192 100644 --- a/nix/ext/pg_plan_filter.nix +++ b/nix/ext/pg_plan_filter.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, makeWrapper, + latestOnly ? false, }: let @@ -50,9 +51,15 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.rev value.hash) supportedVersions + lib.mapAttrs (name: value: build name value.rev value.hash) versionsToUse ); in pkgs.buildEnv { @@ -69,18 +76,23 @@ pkgs.buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; defaultSettings = { shared_preload_libraries = [ "plan_filter" ]; }; pgRegressTestName = "pg_plan_filter"; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_repack.nix b/nix/ext/pg_repack.nix index 153cebd76..89b1d1118 100644 --- a/nix/ext/pg_repack.nix +++ b/nix/ext/pg_repack.nix @@ -6,6 +6,7 @@ postgresqlTestHook, testers, buildEnv, + latestOnly ? false, }: let pname = "pg_repack"; @@ -21,10 +22,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # Build function for individual versions build = @@ -117,7 +122,7 @@ buildEnv { postBuild = '' # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -l $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -129,8 +134,13 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_stat_monitor.nix b/nix/ext/pg_stat_monitor.nix index ddf46de30..c82eaf830 100644 --- a/nix/ext/pg_stat_monitor.nix +++ b/nix/ext/pg_stat_monitor.nix @@ -4,6 +4,7 @@ fetchFromGitHub, postgresql, buildEnv, + latestOnly ? false, }: let pname = "pg_stat_monitor"; @@ -19,9 +20,15 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash value.revision) supportedVersions + lib.mapAttrs (name: value: build name value.hash value.revision) versionsToUse ); # Build function for individual versions @@ -85,7 +92,7 @@ buildEnv { postBuild = '' # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -l $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -97,8 +104,13 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pg_tle.nix b/nix/ext/pg_tle.nix index 7101952a2..d5d1e4446 100644 --- a/nix/ext/pg_tle.nix +++ b/nix/ext/pg_tle.nix @@ -7,6 +7,7 @@ flex, openssl, libkrb5, + latestOnly ? false, }: let pname = "pg_tle"; @@ -78,10 +79,14 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); in buildEnv { name = pname; @@ -94,17 +99,22 @@ buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; defaultSettings = { shared_preload_libraries = [ "pg_tle" ]; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgaudit.nix b/nix/ext/pgaudit.nix index 55dd237f7..06e4bca40 100644 --- a/nix/ext/pgaudit.nix +++ b/nix/ext/pgaudit.nix @@ -6,6 +6,7 @@ libkrb5, openssl, postgresql, + latestOnly ? false, }: #adapted from https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/sql/postgresql/ext/pgaudit.nix let @@ -24,12 +25,16 @@ let # Supported versions sorted (for libraries) versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; # Build packages only for supported versions (with libraries) - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # Helper function to generate migration SQL file pairs # Returns a list of {from, to} pairs for sequential migrations @@ -217,7 +222,7 @@ buildEnv { '') versions} # Verify all expected library files are present (one per version + symlink) - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -229,9 +234,14 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); defaultSettings = { shared_preload_libraries = "pgaudit"; }; diff --git a/nix/ext/pgjwt.nix b/nix/ext/pgjwt.nix index 6bac9dcd8..348b534c3 100644 --- a/nix/ext/pgjwt.nix +++ b/nix/ext/pgjwt.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, unstableGitUpdater, + latestOnly ? false, }: let pname = "pgjwt"; @@ -14,9 +15,13 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.trace "Versions: ${toString (builtins.length versions)}" ( - builtins.length versions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; build = version: hash: revision: stdenv.mkDerivation { @@ -68,7 +73,7 @@ let }; }; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash value.revision) supportedVersions + lib.mapAttrs (name: value: build name value.hash value.revision) versionsToUse ); in buildEnv { @@ -77,8 +82,13 @@ buildEnv { pathsToLink = [ "/share/postgresql/extension" ]; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgmq/default.nix b/nix/ext/pgmq/default.nix index 518308211..42613b6ac 100644 --- a/nix/ext/pgmq/default.nix +++ b/nix/ext/pgmq/default.nix @@ -4,6 +4,7 @@ fetchFromGitHub, postgresql, buildEnv, + latestOnly ? false, }: let pname = "pgmq"; @@ -19,10 +20,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # Build function for individual versions build = @@ -99,11 +104,16 @@ buildEnv { pathsToLink = [ "/share/postgresql/extension" ]; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; defaultSettings = { search_path = "\"$user\", public, auth, extensions"; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgroonga/default.nix b/nix/ext/pgroonga/default.nix index 89aca72a2..4a824ac25 100644 --- a/nix/ext/pgroonga/default.nix +++ b/nix/ext/pgroonga/default.nix @@ -11,6 +11,7 @@ buildEnv, supabase-groonga, mecab-naist-jdic, + latestOnly ? false, }: let pname = "pgroonga"; @@ -26,10 +27,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # List of C extensions to be included in the build cExtensions = [ @@ -159,7 +164,7 @@ buildEnv { ]; postBuild = '' # Verify all expected library files are present - expectedFiles=${toString ((numberOfVersions + 1) * (builtins.length cExtensions))} + expectedFiles=${toString ((numberOfVersionsBuilt + 1) * (builtins.length cExtensions))} actualFiles=$(ls -l $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -171,8 +176,13 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgrouting/default.nix b/nix/ext/pgrouting/default.nix index a05101dd8..cff4e7d86 100644 --- a/nix/ext/pgrouting/default.nix +++ b/nix/ext/pgrouting/default.nix @@ -7,6 +7,7 @@ cmake, boost, buildEnv, + latestOnly ? false, }: let pname = "pgrouting"; @@ -22,10 +23,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # Build function for individual versions build = @@ -130,7 +135,7 @@ buildEnv { postBuild = '' #Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -l $out/lib/lib${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -142,8 +147,13 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgsodium.nix b/nix/ext/pgsodium.nix index fa111d8a5..ad5cd008b 100644 --- a/nix/ext/pgsodium.nix +++ b/nix/ext/pgsodium.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, libsodium, + latestOnly ? false, }: let pname = "pgsodium"; @@ -20,10 +21,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; # Build function for individual pgsodium versions build = @@ -89,7 +94,7 @@ pkgs.buildEnv { postBuild = '' # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -101,8 +106,13 @@ pkgs.buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgsql-http.nix b/nix/ext/pgsql-http.nix index 3ad03b80c..e36f4a218 100644 --- a/nix/ext/pgsql-http.nix +++ b/nix/ext/pgsql-http.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, curl, + latestOnly ? false, }: let pname = "http"; @@ -20,10 +21,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # Build function for individual versions build = @@ -91,7 +96,7 @@ pkgs.buildEnv { ]; postBuild = '' # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -103,8 +108,13 @@ pkgs.buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/pgvector.nix b/nix/ext/pgvector.nix index bcf86ebfb..12b8816d6 100644 --- a/nix/ext/pgvector.nix +++ b/nix/ext/pgvector.nix @@ -4,6 +4,7 @@ stdenv, fetchFromGitHub, postgresql, + latestOnly ? false, }: let pname = "vector"; @@ -19,10 +20,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; # Build function for individual versions build = @@ -83,9 +88,14 @@ pkgs.buildEnv { ]; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); pgRegressTestName = "pgvector"; }; } diff --git a/nix/ext/plpgsql-check.nix b/nix/ext/plpgsql-check.nix index faf918c67..05542b949 100644 --- a/nix/ext/plpgsql-check.nix +++ b/nix/ext/plpgsql-check.nix @@ -7,6 +7,7 @@ buildEnv, makeWrapper, switch-ext-version, + latestOnly ? false, }: let pname = "plpgsql_check"; @@ -22,9 +23,15 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash value.revision) supportedVersions + lib.mapAttrs (name: value: build name value.hash value.revision) versionsToUse ); # Build function for individual versions @@ -108,7 +115,7 @@ buildEnv { ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -l $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -133,7 +140,9 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions switch-ext-version; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit switch-ext-version latestOnly; hasBackgroundWorker = true; defaultSettings = { shared_preload_libraries = [ @@ -142,6 +151,9 @@ buildEnv { ]; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/plv8/default.nix b/nix/ext/plv8/default.nix index 50927521b..93a17434a 100644 --- a/nix/ext/plv8/default.nix +++ b/nix/ext/plv8/default.nix @@ -14,6 +14,7 @@ nodejs_20, libcxx, v8_oldstable, + latestOnly ? false, }: let @@ -30,10 +31,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # plv8 3.1 requires an older version of v8 (we cannot use nodejs.libv8) v8 = v8_oldstable; @@ -221,7 +226,7 @@ buildEnv { ]; postBuild = '' # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -233,8 +238,13 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/postgis.nix b/nix/ext/postgis.nix index 3989e8aae..74539e5de 100644 --- a/nix/ext/postgis.nix +++ b/nix/ext/postgis.nix @@ -17,6 +17,7 @@ callPackage, buildEnv, sfcgal, + latestOnly ? false, }: let @@ -34,10 +35,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # List of C extensions to be included in the build cExtensions = [ @@ -190,9 +195,9 @@ in ]; postBuild = '' # Verify all expected library files are present - # We expect: (numberOfVersions * cExtensions) versioned libraries + cExtensions symlinks + # We expect: (numberOfVersionsBuilt * cExtensions) versioned libraries + cExtensions symlinks expectedFiles=${ - toString ((numberOfVersions * builtins.length cExtensions) + builtins.length cExtensions) + toString ((numberOfVersionsBuilt * builtins.length cExtensions) + builtins.length cExtensions) } actualFiles=$(ls -A $out/lib/*${postgresql.dlSuffix} | wc -l) @@ -205,9 +210,14 @@ in ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; }).overrideAttrs (_: { diff --git a/nix/ext/rum.nix b/nix/ext/rum.nix index 6a9492e38..a16be47c9 100644 --- a/nix/ext/rum.nix +++ b/nix/ext/rum.nix @@ -4,6 +4,7 @@ fetchFromGitHub, postgresql, buildEnv, + latestOnly ? false, }: let pname = "rum"; @@ -19,9 +20,15 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash value.revision) supportedVersions + lib.mapAttrs (name: value: build name value.hash value.revision) versionsToUse ); # Build function for individual versions @@ -82,7 +89,7 @@ buildEnv { postBuild = '' # Verify all expected library files are present - expectedFiles=${toString (numberOfVersions + 1)} + expectedFiles=${toString (numberOfVersionsBuilt + 1)} actualFiles=$(ls -l $out/lib/${pname}*${postgresql.dlSuffix} | wc -l) if [[ "$actualFiles" != "$expectedFiles" ]]; then @@ -94,8 +101,13 @@ buildEnv { ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/timescaledb.nix b/nix/ext/timescaledb.nix index 765ca5267..dbfb2a836 100644 --- a/nix/ext/timescaledb.nix +++ b/nix/ext/timescaledb.nix @@ -11,6 +11,7 @@ switch-ext-version, coreutils, writeShellApplication, + latestOnly ? false, }: let @@ -97,9 +98,15 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash (value.revision or name)) supportedVersions + lib.mapAttrs (name: value: build name value.hash (value.revision or name)) versionsToUse ); switch-timescaledb-loader = writeShellApplication { name = "switch_timescaledb_loader"; @@ -145,13 +152,18 @@ buildEnv { ]; passthru = { - inherit versions numberOfVersions switch-ext-version; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit switch-ext-version latestOnly; hasBackgroundWorker = true; libName = "timescaledb-loader"; defaultSettings = { shared_preload_libraries = [ "timescaledb" ]; }; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); }; } diff --git a/nix/ext/vault.nix b/nix/ext/vault.nix index 537febc0b..c48546ecb 100644 --- a/nix/ext/vault.nix +++ b/nix/ext/vault.nix @@ -5,6 +5,7 @@ fetchFromGitHub, libsodium, postgresql, + latestOnly ? false, }: let pname = "supabase_vault"; @@ -20,10 +21,14 @@ let # Derived version information versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; - packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.hash) supportedVersions - ); + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; + packages = builtins.attrValues (lib.mapAttrs (name: value: build name value.hash) versionsToUse); # Build function for individual pgsodium versions build = @@ -86,9 +91,14 @@ pkgs.buildEnv { ]; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); pgRegressTestName = "vault"; }; } diff --git a/nix/ext/wal2json.nix b/nix/ext/wal2json.nix index 43ddab8df..f1ccd273b 100644 --- a/nix/ext/wal2json.nix +++ b/nix/ext/wal2json.nix @@ -5,6 +5,7 @@ fetchFromGitHub, postgresql, makeWrapper, + latestOnly ? false, }: let @@ -56,9 +57,15 @@ let ) allVersions; versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; - numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + { "${latestVersion}" = supportedVersions.${latestVersion}; } + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; packages = builtins.attrValues ( - lib.mapAttrs (name: value: build name value.rev value.hash) supportedVersions + lib.mapAttrs (name: value: build name value.rev value.hash) versionsToUse ); in pkgs.buildEnv { @@ -89,15 +96,20 @@ pkgs.buildEnv { # checks (set -x test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + 1) + toString (numberOfVersionsBuilt + 1) }" ) ''; passthru = { - inherit versions numberOfVersions pname; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; + inherit pname latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); defaultSettings = { wal_level = "logical"; }; diff --git a/nix/ext/wrappers/default.nix b/nix/ext/wrappers/default.nix index 696358ea4..5383e30ec 100644 --- a/nix/ext/wrappers/default.nix +++ b/nix/ext/wrappers/default.nix @@ -9,6 +9,7 @@ buildEnv, rust-bin, git, + latestOnly ? false, }: let pname = "wrappers"; @@ -207,6 +208,13 @@ let versions = lib.naturalSort (lib.attrNames supportedVersions); latestVersion = lib.last versions; numberOfVersions = builtins.length versions; + versionsToUse = + if latestOnly then + lib.filterAttrs (n: _: n == latestVersion) supportedVersions + else + supportedVersions; + versionsBuilt = if latestOnly then [ latestVersion ] else versions; + numberOfVersionsBuilt = builtins.length versionsBuilt; # Filter out previously packaged versions that are actually built for this PG version # This prevents double-counting when a version appears in both lists previouslyPackagedVersions = builtins.filter ( @@ -216,7 +224,7 @@ let packagesAttrSet = lib.mapAttrs' (name: value: { name = lib.replaceStrings [ "." ] [ "_" ] name; value = build name value.hash value.rust value.pgrx; - }) supportedVersions; + }) versionsToUse; packages = builtins.attrValues packagesAttrSet; in (buildEnv { @@ -240,69 +248,101 @@ in # Create main library symlink to latest version ln -sfn ${pname}-${latestVersion}${postgresql.dlSuffix} $out/lib/${pname}${postgresql.dlSuffix} - # Create symlinks for all previously packaged versions to main library - for v in ${lib.concatStringsSep " " previouslyPackagedVersions}; do - ln -sfn $out/lib/${pname}${postgresql.dlSuffix} $out/lib/${pname}-$v${postgresql.dlSuffix} - done + ${ + if latestOnly then + '' + # latestOnly mode: skip previouslyPackagedVersions symlinks + '' + else + '' + # Create symlinks for all previously packaged versions to main library + for v in ${lib.concatStringsSep " " previouslyPackagedVersions}; do + ln -sfn $out/lib/${pname}${postgresql.dlSuffix} $out/lib/${pname}-$v${postgresql.dlSuffix} + done + '' + } } - create_migration_sql_files() { + ${ + if latestOnly then + '' + # latestOnly mode: skip migration SQL files entirely + '' + else + '' + create_migration_sql_files() { - PREVIOUS_VERSION="" - while IFS= read -r i; do - FILENAME=$(basename "$i") - VERSION="$(grep -oE '[0-9]+\.[0-9]+\.[0-9]+' <<< $FILENAME)" - if [[ "$PREVIOUS_VERSION" != "" ]]; then - # Always write to $out/share/postgresql/extension, not $DIRNAME - # because $DIRNAME might be a symlinked read-only path from the Nix store - # We use -L with cp to dereference symlinks (copy the actual file content, not the symlink) - MIGRATION_FILENAME="$out/share/postgresql/extension/''${FILENAME/$VERSION/$PREVIOUS_VERSION--$VERSION}" - cp -L "$i" "$MIGRATION_FILENAME" - fi - PREVIOUS_VERSION="$VERSION" - done < <(find $out -name '*.sql' | sort -V) + PREVIOUS_VERSION="" + while IFS= read -r i; do + FILENAME=$(basename "$i") + VERSION="$(grep -oE '[0-9]+\.[0-9]+\.[0-9]+' <<< $FILENAME)" + if [[ "$PREVIOUS_VERSION" != "" ]]; then + # Always write to $out/share/postgresql/extension, not $DIRNAME + # because $DIRNAME might be a symlinked read-only path from the Nix store + # We use -L with cp to dereference symlinks (copy the actual file content, not the symlink) + MIGRATION_FILENAME="$out/share/postgresql/extension/''${FILENAME/$VERSION/$PREVIOUS_VERSION--$VERSION}" + cp -L "$i" "$MIGRATION_FILENAME" + fi + PREVIOUS_VERSION="$VERSION" + done < <(find $out -name '*.sql' | sort -V) - # Create empty SQL files for previously packaged versions that don't exist - # This compensates for versions that failed to produce SQL files in the past - for prev_version in ${lib.concatStringsSep " " previouslyPackagedVersions}; do - sql_file="$out/share/postgresql/extension/wrappers--$prev_version.sql" - if [ ! -f "$sql_file" ]; then - echo "-- Empty migration file for previously packaged version $prev_version" > "$sql_file" - fi - done + # Create empty SQL files for previously packaged versions that don't exist + # This compensates for versions that failed to produce SQL files in the past + for prev_version in ${lib.concatStringsSep " " previouslyPackagedVersions}; do + sql_file="$out/share/postgresql/extension/wrappers--$prev_version.sql" + if [ ! -f "$sql_file" ]; then + echo "-- Empty migration file for previously packaged version $prev_version" > "$sql_file" + fi + done - # Create migration SQL files from previous versions to newer versions - # Skip if the migration file already exists (to avoid conflicts with the first loop) - for prev_version in ${lib.concatStringsSep " " previouslyPackagedVersions}; do - for curr_version in ${lib.concatStringsSep " " versions}; do - if [[ "$(printf '%s\n%s' "$prev_version" "$curr_version" | sort -V | head -n1)" == "$prev_version" ]] && [[ "$prev_version" != "$curr_version" ]]; then - main_sql_file="$out/share/postgresql/extension/wrappers--$curr_version.sql" - new_file="$out/share/postgresql/extension/wrappers--$prev_version--$curr_version.sql" - # Only create if it doesn't already exist (first loop may have created it) - if [ -f "$main_sql_file" ] && [ ! -f "$new_file" ]; then - cp "$main_sql_file" "$new_file" - sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$new_file" - fi - fi - done - done + # Create migration SQL files from previous versions to newer versions + # Skip if the migration file already exists (to avoid conflicts with the first loop) + for prev_version in ${lib.concatStringsSep " " previouslyPackagedVersions}; do + for curr_version in ${lib.concatStringsSep " " versions}; do + if [[ "$(printf '%s\n%s' "$prev_version" "$curr_version" | sort -V | head -n1)" == "$prev_version" ]] && [[ "$prev_version" != "$curr_version" ]]; then + main_sql_file="$out/share/postgresql/extension/wrappers--$curr_version.sql" + new_file="$out/share/postgresql/extension/wrappers--$prev_version--$curr_version.sql" + # Only create if it doesn't already exist (first loop may have created it) + if [ -f "$main_sql_file" ] && [ ! -f "$new_file" ]; then + cp "$main_sql_file" "$new_file" + sed -i 's|$libdir/wrappers-[0-9.]*|$libdir/wrappers|g' "$new_file" + fi + fi + done + done + } + '' } create_control_files create_lib_files - create_migration_sql_files + ${if latestOnly then "" else "create_migration_sql_files"} # Verify library count matches expected - (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ - toString (numberOfVersions + numberOfPreviouslyPackagedVersions + 1) - }") + ${ + if latestOnly then + '' + (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "2") + '' + else + '' + (test "$(ls -A $out/lib/${pname}*${postgresql.dlSuffix} | wc -l)" = "${ + toString (numberOfVersions + numberOfPreviouslyPackagedVersions + 1) + }") + '' + } ''; passthru = { - inherit versions numberOfVersions; + versions = versionsBuilt; + numberOfVersions = numberOfVersionsBuilt; pname = "${pname}"; + inherit latestOnly; version = - "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); + if latestOnly then + latestVersion + else + "multi-" + lib.concatStringsSep "-" (map (v: lib.replaceStrings [ "." ] [ "-" ] v) versions); # Expose individual packages for CI to build separately packages = packagesAttrSet // { recurseForDerivations = true; diff --git a/nix/packages/cli-smoke-test.nix b/nix/packages/cli-smoke-test.nix new file mode 100644 index 000000000..334ab002e --- /dev/null +++ b/nix/packages/cli-smoke-test.nix @@ -0,0 +1,222 @@ +{ + writeShellApplication, + coreutils, + gnused, + supabase-cli, + yq, + postgresql_15, +}: +writeShellApplication { + name = "cli-smoke-test"; + runtimeInputs = [ + coreutils + gnused + supabase-cli + yq + postgresql_15 + ]; + text = '' + # CLI Smoke Test - Tests Supabase CLI with locally built Docker images + # + # Usage: + # nix run .#cli-smoke-test -- 17 + # nix run .#cli-smoke-test -- --no-build 15 + # nix run .#cli-smoke-test -- --debug 17 # Full debug output (local only) + + set -euo pipefail + + REPO_ROOT="$(pwd)" + PG_VERSION="" + SKIP_BUILD=false + DEBUG_MODE=false + WORK_DIR="" + + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + NC='\033[0m' + + log_info() { echo -e "''${GREEN}[INFO]''${NC} $1"; } + log_warn() { echo -e "''${YELLOW}[WARN]''${NC} $1"; } + log_error() { echo -e "''${RED}[ERROR]''${NC} $1"; } + + print_help() { + cat << 'EOF' + Usage: nix run .#cli-smoke-test -- [OPTIONS] PG_VERSION + + Run Supabase CLI smoke tests with a locally built PostgreSQL Docker image. + + Arguments: + PG_VERSION PostgreSQL version to test (15 or 17) + + Options: + -h, --help Show this help message + --no-build Skip building the image (use existing supabase/postgres:) + --debug Enable debug output (includes credentials - local use only!) + + Examples: + nix run .#cli-smoke-test -- 17 + nix run .#cli-smoke-test -- 15 + nix run .#cli-smoke-test -- --no-build 17 + nix run .#cli-smoke-test -- --debug 17 + EOF + } + + cleanup() { + local exit_code=$? + log_info "Cleaning up..." + supabase stop --no-backup 2>/dev/null || true + if [[ -n "$WORK_DIR" ]] && [[ -d "$WORK_DIR" ]]; then + rm -rf "$WORK_DIR" + fi + exit $exit_code + } + + trap cleanup EXIT + + main() { + while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) print_help; exit 0 ;; + --no-build) SKIP_BUILD=true; shift ;; + --debug) DEBUG_MODE=true; shift ;; + -*) log_error "Unknown option: $1"; print_help; exit 1 ;; + *) PG_VERSION="$1"; shift ;; + esac + done + + if [[ -z "$PG_VERSION" ]]; then + log_error "PostgreSQL version required (15 or 17)" + print_help + exit 1 + fi + + if [[ "$PG_VERSION" != "15" && "$PG_VERSION" != "17" ]]; then + log_error "Invalid PostgreSQL version: $PG_VERSION (must be 15 or 17)" + exit 1 + fi + + DOCKERFILE="Dockerfile-$PG_VERSION" + # CLI uses public.ecr.aws/supabase/postgres as base image + IMAGE_NAME="public.ecr.aws/supabase/postgres:$PG_VERSION" + + if [[ ! -f "$REPO_ROOT/$DOCKERFILE" ]]; then + log_error "Dockerfile not found: $REPO_ROOT/$DOCKERFILE" + log_error "Make sure you're running from the postgres repository root" + exit 1 + fi + + if [[ ! -f "$REPO_ROOT/ansible/vars.yml" ]]; then + log_error "ansible/vars.yml not found" + log_error "Make sure you're running from the postgres repository root" + exit 1 + fi + + log_info "CLI Smoke Test for PostgreSQL $PG_VERSION" + + # Build Docker image + if [[ "$SKIP_BUILD" != "true" ]]; then + log_info "Building Docker image from $DOCKERFILE..." + if ! docker build -f "$REPO_ROOT/$DOCKERFILE" -t "$IMAGE_NAME" "$REPO_ROOT"; then + log_error "Failed to build Docker image" + exit 1 + fi + else + log_info "Skipping build (--no-build)" + if ! docker image inspect "$IMAGE_NAME" &>/dev/null; then + log_error "Image $IMAGE_NAME not found. Run without --no-build first." + exit 1 + fi + fi + + # Get component versions from ansible/vars.yml + log_info "Reading component versions from ansible/vars.yml..." + REST_VERSION=$(yq -r '.postgrest_release' "$REPO_ROOT/ansible/vars.yml") + AUTH_VERSION=$(yq -r '.gotrue_release' "$REPO_ROOT/ansible/vars.yml") + PG_RELEASE=$(yq -r ".postgres_release[\"postgres$PG_VERSION\"]" "$REPO_ROOT/ansible/vars.yml") + + log_info " PostgREST: $REST_VERSION" + log_info " GoTrue: $AUTH_VERSION" + log_info " Postgres: $PG_RELEASE" + + # Create working directory + WORK_DIR=$(mktemp -d) + log_info "Working directory: $WORK_DIR" + cd "$WORK_DIR" + + # Prepare Supabase CLI config + mkdir -p supabase/.temp + + # Set component versions - CLI reads these to determine which images to use + echo "v$REST_VERSION" > supabase/.temp/rest-version + echo "v$AUTH_VERSION" > supabase/.temp/gotrue-version + # Use major version so CLI constructs supabase/postgres:$PG_VERSION (our local build) + echo "$PG_VERSION" > supabase/.temp/postgres-version + + cat > supabase/config.toml << EOF + [db] + major_version = $PG_VERSION + EOF + + log_info "Starting Supabase..." + if [[ "$DEBUG_MODE" == "true" ]]; then + # Debug mode: full output including credentials (local use only) + if ! supabase start --debug; then + log_error "Failed to start Supabase" + exit 1 + fi + else + # CI mode: redact credentials from output + SUPABASE_OUTPUT=$(mktemp) + SUPABASE_EXIT=0 + supabase start > "$SUPABASE_OUTPUT" 2>&1 || SUPABASE_EXIT=$? + + # Redact sensitive information before displaying + sed -E \ + -e 's/(Secret[[:space:]]*\│[[:space:]]*)[^│]*/\1[REDACTED]/g' \ + -e 's/(Publishable[[:space:]]*\│[[:space:]]*)[^│]*/\1[REDACTED]/g' \ + -e 's/(Access Key[[:space:]]*\│[[:space:]]*)[^│]*/\1[REDACTED]/g' \ + -e 's/(Secret Key[[:space:]]*\│[[:space:]]*)[^│]*/\1[REDACTED]/g' \ + -e 's/postgres:postgres@/postgres:[REDACTED]@/g' \ + -e 's/sb_secret_[A-Za-z0-9_-]*/sb_secret_[REDACTED]/g' \ + -e 's/sb_publishable_[A-Za-z0-9_-]*/sb_publishable_[REDACTED]/g' \ + -e 's/"Data":"[^"]*"/"Data":"[REDACTED]"/g' \ + -e 's/"SecretKey":[0-9]*/"SecretKey":[REDACTED]/g' \ + -e 's/[a-f0-9]{32,64}/[REDACTED]/g' \ + "$SUPABASE_OUTPUT" + + rm -f "$SUPABASE_OUTPUT" + + if [[ $SUPABASE_EXIT -ne 0 ]]; then + log_error "Failed to start Supabase" + exit 1 + fi + fi + + log_info "Verifying database connection..." + if ! PGPASSWORD=postgres psql -h localhost -p 54322 -U postgres -d postgres -c "SELECT version();" ; then + log_error "Failed to connect to database" + exit 1 + fi + + log_info "Running health checks..." + PGPASSWORD=postgres psql -h localhost -p 54322 -U postgres -d postgres << 'EOSQL' + -- Check extensions schema exists + SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = 'extensions'); + + -- Check some key extensions + SELECT extname, extversion FROM pg_extension WHERE extname IN ('uuid-ossp', 'pgcrypto', 'pgjwt') ORDER BY extname; + + -- Basic table creation test + CREATE TABLE IF NOT EXISTS smoke_test (id serial primary key, created_at timestamptz default now()); + INSERT INTO smoke_test DEFAULT VALUES; + SELECT * FROM smoke_test; + DROP TABLE smoke_test; + EOSQL + + log_info "''${GREEN}CLI Smoke Test PASSED for PostgreSQL $PG_VERSION''${NC}" + } + + main "$@" + ''; +} diff --git a/nix/packages/default.nix b/nix/packages/default.nix index 2c63f2223..689ff691d 100644 --- a/nix/packages/default.nix +++ b/nix/packages/default.nix @@ -23,6 +23,9 @@ inherit (pkgs) lib; inherit inputs; }; + pg-startup-profiler-pkgs = pkgs.callPackage ./pg-startup-profiler.nix { + inherit (pkgs) lib; + }; pkgs-lib = pkgs.callPackage ./lib.nix { psql_15 = self'.packages."psql_15/bin"; psql_17 = self'.packages."psql_17/bin"; @@ -37,6 +40,12 @@ build-test-ami = pkgs.callPackage ./build-test-ami.nix { packer = self'.packages.packer; }; cleanup-ami = pkgs.callPackage ./cleanup-ami.nix { }; dbmate-tool = pkgs.callPackage ./dbmate-tool.nix { inherit (self.supabase) defaults; }; + docker-image-inputs = pkgs.callPackage ./docker-image-inputs.nix { + psql_15_slim = self'.packages."psql_15_slim/bin"; + psql_17_slim = self'.packages."psql_17_slim/bin"; + psql_orioledb-17_slim = self'.packages."psql_orioledb-17_slim/bin"; + supabase-groonga = self'.packages.supabase-groonga; + }; docs = pkgs.callPackage ./docs.nix { }; pgbouncer = pkgs.callPackage ../pgbouncer.nix { }; github-matrix = pkgs.callPackage ./github-matrix { @@ -45,6 +54,7 @@ gatekeeper = pkgs.callPackage ./gatekeeper.nix { inherit inputs pkgs; }; supabase-groonga = pkgs.callPackage ../ext/pgroonga/groonga.nix { }; http-mock-server = pkgs.callPackage ./http-mock-server.nix { }; + image-size-analyzer = pkgs.callPackage ./image-size-analyzer.nix { }; local-infra-bootstrap = pkgs.callPackage ./local-infra-bootstrap.nix { }; mecab-naist-jdic = pkgs.callPackage ./mecab-naist-jdic.nix { }; migrate-tool = pkgs.callPackage ./migrate-tool.nix { psql_15 = self'.packages."psql_15/bin"; }; @@ -77,8 +87,21 @@ sync-exts-versions = pkgs.callPackage ./sync-exts-versions.nix { inherit (inputs') nix-editor; }; trigger-nix-build = pkgs.callPackage ./trigger-nix-build.nix { }; update-readme = pkgs.callPackage ./update-readme.nix { }; + supabase-cli = pkgs.callPackage ./supabase-cli.nix { }; + docker-image-test = pkgs.callPackage ./docker-image-test.nix { + psql_15 = self'.packages."psql_15/bin"; + psql_17 = self'.packages."psql_17/bin"; + psql_orioledb-17 = self'.packages."psql_orioledb-17/bin"; + inherit (self'.packages) pg_regress; + }; + cli-smoke-test = pkgs.callPackage ./cli-smoke-test.nix { + inherit (self'.packages) supabase-cli; + inherit (pkgs) yq; + postgresql_15 = self'.packages."postgresql_15"; + }; inherit (pkgs.callPackage ./wal-g.nix { }) wal-g-2; inherit (supascan-pkgs) goss supascan supascan-specs; + inherit (pg-startup-profiler-pkgs) pg-startup-profiler; inherit (pkgs.cargo-pgrx) cargo-pgrx_0_11_3 cargo-pgrx_0_12_6 diff --git a/nix/packages/docker-image-inputs.nix b/nix/packages/docker-image-inputs.nix new file mode 100644 index 000000000..76784a8c1 --- /dev/null +++ b/nix/packages/docker-image-inputs.nix @@ -0,0 +1,166 @@ +{ + lib, + stdenv, + writeShellApplication, + writeText, + jq, + # Slim packages used in Docker images + psql_15_slim, + psql_17_slim, + psql_orioledb-17_slim, + # Groonga is also installed in images + supabase-groonga, +}: + +let + root = ../..; + + # Bundle all source files that are copied into Docker images + dockerSources = stdenv.mkDerivation { + name = "docker-image-sources"; + src = lib.fileset.toSource { + inherit root; + fileset = lib.fileset.unions [ + # Dockerfiles + (root + "/Dockerfile-15") + (root + "/Dockerfile-17") + (root + "/Dockerfile-orioledb-17") + + # PostgreSQL configuration files (copied into images) + (root + "/ansible/files/postgresql_config") + (root + "/ansible/files/pgbouncer_config") + (root + "/ansible/files/stat_extension.sql") + (root + "/ansible/files/pgsodium_getkey_urandom.sh.j2") + (root + "/ansible/files/postgresql_extension_custom_scripts") + (root + "/ansible/files/walg_helper_scripts") + + # Database migrations (copied into images) + (root + "/migrations/db") + ]; + }; + + phases = [ + "unpackPhase" + "installPhase" + ]; + installPhase = '' + mkdir -p $out + cp -r . $out/ + ''; + }; + + # Create a manifest of all package store paths + # This ensures the hash changes when any package changes + packageManifest = writeText "docker-image-packages-manifest" '' + # Slim PostgreSQL packages installed in Docker images + psql_15_slim=${psql_15_slim} + psql_17_slim=${psql_17_slim} + psql_orioledb-17_slim=${psql_orioledb-17_slim} + + # Groonga (installed in all images) + supabase-groonga=${supabase-groonga} + ''; + + # Combined derivation that depends on both sources and packages + dockerImageInputs = stdenv.mkDerivation { + name = "docker-image-inputs"; + + # No source needed - we just create a manifest + dontUnpack = true; + + # These are the actual dependencies that affect the hash + buildInputs = [ + dockerSources + psql_15_slim + psql_17_slim + psql_orioledb-17_slim + supabase-groonga + ]; + + installPhase = '' + mkdir -p $out + + # Include source files reference + echo "sources=${dockerSources}" > $out/manifest + + # Include package manifest + cat ${packageManifest} >> $out/manifest + + # Create a combined hash from all inputs + echo "" >> $out/manifest + echo "# Combined input paths:" >> $out/manifest + echo "${dockerSources}" >> $out/manifest + echo "${psql_15_slim}" >> $out/manifest + echo "${psql_17_slim}" >> $out/manifest + echo "${psql_orioledb-17_slim}" >> $out/manifest + echo "${supabase-groonga}" >> $out/manifest + ''; + }; +in +writeShellApplication { + name = "docker-image-inputs-hash"; + + runtimeInputs = [ jq ]; + + text = '' + set -euo pipefail + + DOCKER_INPUTS="${dockerImageInputs}" + INPUT_HASH=$(basename "$DOCKER_INPUTS" | cut -d- -f1) + + case "''${1:-hash}" in + hash) + echo "$INPUT_HASH" + ;; + path) + echo "$DOCKER_INPUTS" + ;; + manifest) + cat "$DOCKER_INPUTS/manifest" + ;; + json) + jq -n \ + --arg hash "$INPUT_HASH" \ + --arg path "$DOCKER_INPUTS" \ + --arg sources "${dockerSources}" \ + --arg psql_15_slim "${psql_15_slim}" \ + --arg psql_17_slim "${psql_17_slim}" \ + --arg psql_orioledb_17_slim "${psql_orioledb-17_slim}" \ + --arg supabase_groonga "${supabase-groonga}" \ + '{ + hash: $hash, + path: $path, + sources: $sources, + packages: { + psql_15_slim: $psql_15_slim, + psql_17_slim: $psql_17_slim, + "psql_orioledb-17_slim": $psql_orioledb_17_slim, + "supabase-groonga": $supabase_groonga + } + }' + ;; + *) + echo "Usage: docker-image-inputs-hash [hash|path|manifest|json]" >&2 + exit 1 + ;; + esac + ''; + + meta = { + description = "Get the content hash of all Docker image inputs"; + longDescription = '' + This package tracks all inputs that affect Docker image builds: + - Source files: Dockerfiles, configs, migrations + - Nix packages: psql_*_slim, supabase-groonga + + The hash changes when ANY of these change, including transitive + dependencies of the Nix packages. + + Usage: + docker-image-inputs-hash hash # Get just the hash + docker-image-inputs-hash path # Get the Nix store path + docker-image-inputs-hash manifest # Show all tracked inputs + docker-image-inputs-hash json # Get detailed JSON output + ''; + }; +} diff --git a/nix/packages/docker-image-test.nix b/nix/packages/docker-image-test.nix new file mode 100644 index 000000000..97b3124f3 --- /dev/null +++ b/nix/packages/docker-image-test.nix @@ -0,0 +1,391 @@ +{ + writeShellApplication, + coreutils, + gnused, + python3, + psql_15, + psql_17, + psql_orioledb-17, + pg_regress, +}: +writeShellApplication { + name = "docker-image-test"; + runtimeInputs = [ + coreutils + gnused + python3 + ]; + text = '' + # Test a PostgreSQL Docker image against the pg_regress test suite + # + # Usage: + # nix run .#docker-image-test -- Dockerfile-17 + # nix run .#docker-image-test -- --no-build Dockerfile-15 + + set -euo pipefail + + # Find repo root (where Dockerfiles live) + REPO_ROOT="$(pwd)" + TESTS_DIR="$REPO_ROOT/nix/tests" + TESTS_SQL_DIR="$TESTS_DIR/sql" + HTTP_MOCK_SERVER="$TESTS_DIR/http-mock-server.py" + CONTAINER_NAME="" + IMAGE_TAG="" + POSTGRES_USER="supabase_admin" + POSTGRES_DB="postgres" + POSTGRES_PASSWORD="postgres" + OUTPUT_DIR="" + HTTP_MOCK_PORT="" + HTTP_MOCK_PID="" + KEEP_CONTAINER=false + + # Colors for output + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + NC='\033[0m' + + log_info() { echo -e "''${GREEN}[INFO]''${NC} $1"; } + log_warn() { echo -e "''${YELLOW}[WARN]''${NC} $1"; } + log_error() { echo -e "''${RED}[ERROR]''${NC} $1"; } + + print_help() { + cat << 'EOF' + Usage: nix run .#docker-image-test -- [OPTIONS] DOCKERFILE + + Test a PostgreSQL Docker image against the pg_regress test suite. + + Arguments: + DOCKERFILE The Dockerfile to build and test (e.g., Dockerfile-17) + + Options: + -h, --help Show this help message + --no-build Skip building the image (use existing) + --keep Keep the container running after tests (for debugging) + + Examples: + nix run .#docker-image-test -- Dockerfile-17 + nix run .#docker-image-test -- Dockerfile-15 + nix run .#docker-image-test -- Dockerfile-orioledb-17 + nix run .#docker-image-test -- --no-build Dockerfile-17 + EOF + } + + get_version_info() { + local dockerfile="$1" + case "$dockerfile" in + Dockerfile-15) echo "15 5436" ;; + Dockerfile-17) echo "17 5435" ;; + Dockerfile-orioledb-17) echo "orioledb-17 5437" ;; + *) + log_error "Unknown Dockerfile: $dockerfile" + log_error "Supported: Dockerfile-15, Dockerfile-17, Dockerfile-orioledb-17" + exit 1 + ;; + esac + } + + # Tests to skip for OrioleDB + ORIOLEDB_SKIP_TESTS=( + "index_advisor" + ) + + get_test_list() { + local version="$1" + local tests=() + + # Build list of OrioleDB-specific test basenames + local orioledb_variants=() + for f in "$TESTS_SQL_DIR"/z_orioledb-17_*.sql; do + if [[ -f "$f" ]]; then + local variant_name + variant_name=$(basename "$f" .sql) + local base_name="''${variant_name#z_orioledb-17_}" + orioledb_variants+=("$base_name") + fi + done + + for f in "$TESTS_SQL_DIR"/*.sql; do + local _basename + _basename=$(basename "$f" .sql) + + if [[ "$version" == "orioledb-17" ]]; then + local should_skip=false + for skip_test in "''${ORIOLEDB_SKIP_TESTS[@]}"; do + if [[ "$_basename" == "$skip_test" ]]; then + should_skip=true + break + fi + done + if [[ "$should_skip" == "true" ]]; then + continue + fi + fi + + if [[ "$_basename" == z_* ]]; then + case "$version" in + 15) [[ "$_basename" == z_15_* ]] && tests+=("$_basename") ;; + 17) [[ "$_basename" == z_17_* ]] && tests+=("$_basename") ;; + orioledb-17) [[ "$_basename" == z_orioledb-17_* ]] && tests+=("$_basename") ;; + esac + else + if [[ "$version" == "orioledb-17" ]]; then + local has_variant=false + for variant in "''${orioledb_variants[@]}"; do + if [[ "$_basename" == "$variant" ]]; then + has_variant=true + break + fi + done + if [[ "$has_variant" == "false" ]]; then + tests+=("$_basename") + fi + else + tests+=("$_basename") + fi + fi + done + + printf '%s\n' "''${tests[@]}" | sort + } + + cleanup() { + local exit_code=$? + + if [[ -n "$HTTP_MOCK_PID" ]]; then + kill "$HTTP_MOCK_PID" 2>/dev/null || true + fi + + if [[ -n "$CONTAINER_NAME" ]] && [[ "$KEEP_CONTAINER" != "true" ]]; then + log_info "Cleaning up container $CONTAINER_NAME..." + docker rm -f "$CONTAINER_NAME" 2>/dev/null || true + fi + + if [[ -n "$OUTPUT_DIR" ]] && [[ -d "$OUTPUT_DIR" ]]; then + if [[ $exit_code -ne 0 ]]; then + log_info "Test output preserved at: $OUTPUT_DIR" + else + rm -rf "$OUTPUT_DIR" + fi + fi + + exit $exit_code + } + + trap cleanup EXIT + + wait_for_postgres() { + local host="$1" + local port="$2" + local max_attempts=60 + local attempt=1 + + log_info "Waiting for PostgreSQL to be ready..." + + while [[ $attempt -le $max_attempts ]]; do + if "$PG_ISREADY_PATH" -h "$host" -p "$port" -U "$POSTGRES_USER" -q 2>/dev/null; then + log_info "PostgreSQL is ready" + return 0 + fi + sleep 1 + ((attempt++)) + done + + log_error "PostgreSQL failed to start after ''${max_attempts}s" + return 1 + } + + main() { + local dockerfile="" + local skip_build=false + + while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) print_help; exit 0 ;; + --no-build) skip_build=true; shift ;; + --keep) KEEP_CONTAINER=true; shift ;; + -*) log_error "Unknown option: $1"; print_help; exit 1 ;; + *) dockerfile="$1"; shift ;; + esac + done + + if [[ -z "$dockerfile" ]]; then + log_error "Dockerfile argument required" + print_help + exit 1 + fi + + if [[ ! -f "$REPO_ROOT/$dockerfile" ]]; then + log_error "Dockerfile not found: $REPO_ROOT/$dockerfile" + exit 1 + fi + + read -r VERSION PORT <<< "$(get_version_info "$dockerfile")" + + IMAGE_TAG="pg-docker-test:''${VERSION}" + CONTAINER_NAME="pg-test-''${VERSION}-$$" + OUTPUT_DIR=$(mktemp -d) + + log_info "Testing $dockerfile (version: $VERSION, port: $PORT)" + + if [[ "$skip_build" != "true" ]]; then + log_info "Building image from $dockerfile..." + if ! docker build -f "$REPO_ROOT/$dockerfile" -t "$IMAGE_TAG" "$REPO_ROOT"; then + log_error "Failed to build image" + exit 1 + fi + else + log_info "Skipping build (--no-build)" + if ! docker image inspect "$IMAGE_TAG" &>/dev/null; then + log_error "Image $IMAGE_TAG not found. Run without --no-build first." + exit 1 + fi + fi + + # Set paths based on version + case "$VERSION" in + 15) + PSQL_PATH="${psql_15}/bin/psql" + PG_ISREADY_PATH="${psql_15}/bin/pg_isready" + ;; + 17) + PSQL_PATH="${psql_17}/bin/psql" + PG_ISREADY_PATH="${psql_17}/bin/pg_isready" + ;; + orioledb-17) + PSQL_PATH="${psql_orioledb-17}/bin/psql" + PG_ISREADY_PATH="${psql_orioledb-17}/bin/pg_isready" + ;; + esac + PG_REGRESS_PATH="${pg_regress}/bin/pg_regress" + + log_info "Using psql: $PSQL_PATH" + log_info "Using pg_isready: $PG_ISREADY_PATH" + log_info "Using pg_regress: $PG_REGRESS_PATH" + + log_info "Starting container $CONTAINER_NAME..." + docker run -d \ + --name "$CONTAINER_NAME" \ + -e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ + -p "$PORT:5432" \ + "$IMAGE_TAG" + + if ! wait_for_postgres "localhost" "$PORT"; then + log_error "Container logs:" + docker logs "$CONTAINER_NAME" + exit 1 + fi + + log_info "Starting HTTP mock server on host..." + HTTP_MOCK_PORT=8880 + + python3 "$HTTP_MOCK_SERVER" $HTTP_MOCK_PORT & + HTTP_MOCK_PID=$! + + sleep 2 + if ! kill -0 "$HTTP_MOCK_PID" 2>/dev/null; then + log_error "HTTP mock server failed to start" + exit 1 + fi + log_info "HTTP mock server started on host port $HTTP_MOCK_PORT (PID: $HTTP_MOCK_PID)" + + HTTP_MOCK_HOST="host.docker.internal" + if [[ "$(uname)" == "Linux" ]]; then + HTTP_MOCK_HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}' "$CONTAINER_NAME") + fi + log_info "Container will access mock server at $HTTP_MOCK_HOST:$HTTP_MOCK_PORT" + + log_info "Running prime.sql to enable extensions..." + if ! PGPASSWORD="$POSTGRES_PASSWORD" "$PSQL_PATH" \ + -h localhost \ + -p "$PORT" \ + -U "$POSTGRES_USER" \ + -d "$POSTGRES_DB" \ + -v ON_ERROR_STOP=1 \ + -X \ + -f "$TESTS_DIR/prime.sql" 2>&1; then + log_error "Failed to run prime.sql" + exit 1 + fi + + log_info "Creating test_config table..." + PGPASSWORD="$POSTGRES_PASSWORD" "$PSQL_PATH" \ + -h localhost \ + -p "$PORT" \ + -U "$POSTGRES_USER" \ + -d "$POSTGRES_DB" \ + -c "CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); + INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; + INSERT INTO test_config (key, value) VALUES ('http_mock_host', '$HTTP_MOCK_HOST') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value;" + + log_info "Collecting tests for version $VERSION..." + TEST_LIST=() + while IFS= read -r line; do + TEST_LIST+=("$line") + done < <(get_test_list "$VERSION") + log_info "Running ''${#TEST_LIST[@]} tests" + + mkdir -p "$OUTPUT_DIR/regression_output" + + log_info "Preparing test files..." + PATCHED_TESTS_DIR="$OUTPUT_DIR/tests" + cp -r "$TESTS_DIR" "$PATCHED_TESTS_DIR" + + for f in pgmq.out vault.out; do + if [[ -f "$PATCHED_TESTS_DIR/expected/$f" ]]; then + # shellcheck disable=SC2016 + sed -i.bak \ + -e 's/ "\$user"/ "\\$user"/g' \ + -e 's/search_path $/search_path /' \ + -e 's/^-----------------------------------$/------------------------------------/' \ + "$PATCHED_TESTS_DIR/expected/$f" + rm -f "$PATCHED_TESTS_DIR/expected/$f.bak" + fi + done + if [[ -f "$PATCHED_TESTS_DIR/expected/roles.out" ]]; then + # shellcheck disable=SC2016 + sed -i.bak \ + -e 's/\\"\$user\\"/\\"\\\\$user\\"/g' \ + "$PATCHED_TESTS_DIR/expected/roles.out" + rm -f "$PATCHED_TESTS_DIR/expected/roles.out.bak" + fi + + log_info "Running pg_regress..." + local regress_exit=0 + + if ! PGPASSWORD="$POSTGRES_PASSWORD" "$PG_REGRESS_PATH" \ + --use-existing \ + --dbname="$POSTGRES_DB" \ + --inputdir="$PATCHED_TESTS_DIR" \ + --outputdir="$OUTPUT_DIR/regression_output" \ + --host=localhost \ + --port="$PORT" \ + --user="$POSTGRES_USER" \ + "''${TEST_LIST[@]}" 2>&1; then + regress_exit=1 + fi + + if [[ $regress_exit -eq 0 ]]; then + log_info "''${GREEN}PASS: all ''${#TEST_LIST[@]} tests passed''${NC}" + else + log_error "FAIL: some tests failed" + if [[ -f "$OUTPUT_DIR/regression_output/regression.diffs" ]]; then + echo "" + echo "=== regression.diffs ===" + cat "$OUTPUT_DIR/regression_output/regression.diffs" + echo "========================" + fi + exit 1 + fi + + if [[ "$KEEP_CONTAINER" == "true" ]]; then + log_info "Container kept running: $CONTAINER_NAME (port $PORT)" + log_info "Connect with: psql -h localhost -p $PORT -U $POSTGRES_USER $POSTGRES_DB" + fi + } + + main "$@" + ''; +} diff --git a/nix/packages/image-size-analyzer.nix b/nix/packages/image-size-analyzer.nix new file mode 100644 index 000000000..c12b1e040 --- /dev/null +++ b/nix/packages/image-size-analyzer.nix @@ -0,0 +1,410 @@ +{ + runCommand, + makeWrapper, + dive, + jq, + docker, + coreutils, + gawk, + gnused, + bc, +}: +runCommand "image-size-analyzer" + { + nativeBuildInputs = [ makeWrapper ]; + buildInputs = [ + dive + jq + docker + coreutils + gawk + gnused + bc + ]; + } + '' + mkdir -p $out/bin + cat > $out/bin/image-size-analyzer << 'SCRIPT' + #!/usr/bin/env bash + set -euo pipefail + + # Default values + OUTPUT_JSON=false + NO_BUILD=false + declare -a IMAGES=() + ALL_DOCKERFILES=("Dockerfile-15" "Dockerfile-17" "Dockerfile-orioledb-17") + TIMESTAMP=$(date +%s) + TEMP_DIR="/tmp/image-size-analyzer-$TIMESTAMP" + + show_help() { + cat << EOF + Usage: image-size-analyzer [OPTIONS] + + Analyze Docker image sizes for Supabase Postgres images. + + Options: + --json Output results as JSON instead of TUI + --image DOCKERFILE Analyze specific Dockerfile (can be used multiple times) + Valid values: Dockerfile-15, Dockerfile-17, Dockerfile-orioledb-17 + --no-build Skip building images, analyze existing ones + --help Show this help message + + Examples: + image-size-analyzer # Analyze all images + image-size-analyzer --json # Output as JSON + image-size-analyzer --image Dockerfile-17 # Analyze only Dockerfile-17 + image-size-analyzer --image Dockerfile-15 --image Dockerfile-17 + image-size-analyzer --no-build # Skip build step + EOF + } + + cleanup() { + rm -rf "$TEMP_DIR" 2>/dev/null || true + } + trap cleanup EXIT + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + --json) + OUTPUT_JSON=true + shift + ;; + --no-build) + NO_BUILD=true + shift + ;; + --image) + if [[ -z "$2" ]]; then + echo "Error: --image requires a value" + exit 1 + fi + IMAGES+=("$2") + shift 2 + ;; + --help) + show_help + exit 0 + ;; + *) + echo "Error: Unknown option: $1" + show_help + exit 1 + ;; + esac + done + + # If no images specified, use all + num_images=''${#IMAGES[@]} + if [[ $num_images -eq 0 ]]; then + IMAGES=("''${ALL_DOCKERFILES[@]}") + fi + + # Validate image names + for img in "''${IMAGES[@]}"; do + valid=false + for valid_img in "''${ALL_DOCKERFILES[@]}"; do + if [[ "$img" == "$valid_img" ]]; then + valid=true + break + fi + done + if [[ "$valid" == "false" ]]; then + echo "Error: Invalid Dockerfile: $img" + echo "Valid options: ''${ALL_DOCKERFILES[*]}" + exit 1 + fi + done + + # Check Docker is running + if ! docker info &>/dev/null; then + echo "Error: Docker daemon is not running" + exit 3 + fi + + mkdir -p "$TEMP_DIR" + + # Helper to format bytes + format_bytes() { + local bytes=$1 + if [[ $bytes -ge 1073741824 ]]; then + printf "%.2f GB" "$(echo "scale=2; $bytes / 1073741824" | bc)" + elif [[ $bytes -ge 1048576 ]]; then + printf "%.2f MB" "$(echo "scale=2; $bytes / 1048576" | bc)" + elif [[ $bytes -ge 1024 ]]; then + printf "%.2f KB" "$(echo "scale=2; $bytes / 1024" | bc)" + else + printf "%d B" "$bytes" + fi + } + + # Get tag name from Dockerfile name + get_tag() { + local dockerfile=$1 + local suffix=''${dockerfile#Dockerfile-} + echo "supabase-postgres:$suffix-analyze" + } + + # Build a single image + build_image() { + local dockerfile=$1 + local tag + tag=$(get_tag "$dockerfile") + + echo "Building $dockerfile as $tag..." + if ! docker build -f "$dockerfile" -t "$tag" . ; then + echo "Error: Failed to build $dockerfile" + return 1 + fi + } + + # Get total image size + get_total_size() { + local tag=$1 + docker inspect --format='{{.Size}}' "$tag" 2>/dev/null || echo "0" + } + + # Get layer info using dive + get_layers() { + local tag=$1 + local safe_tag=''${tag//[:\/]/-} + local output_file="$TEMP_DIR/dive-$safe_tag.json" + + if ! dive "$tag" --json "$output_file" >/dev/null; then + echo "Warning: dive failed for $tag" >&2 + echo "[]" + return + fi + + # Extract layer info from dive output (note: dive uses sizeBytes not size) + jq -c '[.layer[] | {index: .index, size_bytes: .sizeBytes, command: .command}] | sort_by(-.size_bytes) | .[0:10]' "$output_file" 2>/dev/null || echo "[]" + } + + # Get directory sizes from dive output + get_directories() { + local tag=$1 + local safe_tag=''${tag//[:\/]/-} + local output_file="$TEMP_DIR/dive-$safe_tag.json" + + if [[ ! -f "$output_file" ]]; then + echo "[]" + return + fi + + # Aggregate file sizes by top-level directory from all layers + jq -c ' + [.layer[].fileList[] | select(.isDir == false and .size > 0)] + | group_by(.path | split("/")[0]) + | map({path: ("/" + (.[0].path | split("/")[0])), size_bytes: (map(.size) | add)}) + | sort_by(-.size_bytes) + | .[0:10] + ' "$output_file" 2>/dev/null || echo "[]" + } + + # Get Nix package sizes + get_nix_packages() { + local tag=$1 + + docker run --rm "$tag" sh -c 'du -sb /nix/store/*/ 2>/dev/null | sort -rn | head -15' 2>/dev/null | \ + awk '{ + size=$1 + path=$2 + # Extract package name from path like /nix/store/abc123-packagename-1.0/ + n=split(path, parts, "/") + store_path=parts[n-1] # Get the nix store hash-name part + # Remove the hash prefix (32 chars + dash) + if (length(store_path) > 33) { + name=substr(store_path, 34) + } else { + name=store_path + } + # Remove trailing slash from name + gsub(/\/$/, "", name) + printf "{\"name\":\"%s\",\"size_bytes\":%d}\n", name, size + }' | jq -s '.' 2>/dev/null || echo "[]" + } + + # Get system package sizes (handles both Debian/Ubuntu and Alpine) + get_system_packages() { + local tag=$1 + local result + + # Try dpkg first (Debian/Ubuntu), then apk (Alpine) + result=$(docker run --rm "$tag" sh -c ' + if command -v dpkg-query >/dev/null 2>&1; then + dpkg-query -W -f="''${Package}\t''${Installed-Size}\n" 2>/dev/null | sort -t" " -k2 -rn | head -15 | awk -F"\t" "{printf \"{\\\"name\\\":\\\"%s\\\",\\\"size_bytes\\\":%d}\\n\", \$1, \$2 * 1024}" + elif command -v apk >/dev/null 2>&1; then + # Get all installed packages and their sizes + # apk info -s outputs "pkg installed size:\nNNNN KiB" with warnings to stdout + for pkg in $(apk info 2>&1 | grep -v "^WARNING"); do + size_line=$(apk info -s "$pkg" 2>&1 | grep -E "^[0-9]+ [KMG]iB$") + # Extract number and unit (e.g., "3214 KiB" -> 3214 * 1024) + size_num=$(echo "$size_line" | awk "{print \$1}") + size_unit=$(echo "$size_line" | awk "{print \$2}") + if [ -n "$size_num" ] && [ "$size_num" -gt 0 ] 2>/dev/null; then + case "$size_unit" in + KiB) size_bytes=$((size_num * 1024)) ;; + MiB) size_bytes=$((size_num * 1024 * 1024)) ;; + GiB) size_bytes=$((size_num * 1024 * 1024 * 1024)) ;; + *) size_bytes=$size_num ;; + esac + printf "{\"name\":\"%s\",\"size_bytes\":%s}\n" "$pkg" "$size_bytes" + fi + done + else + echo "" + fi + ' 2>/dev/null) + + if [[ -n "$result" ]]; then + echo "$result" | jq -s 'sort_by(-.size_bytes) | .[0:15]' 2>/dev/null || echo "[]" + else + echo "[]" + fi + } + + # Analyze a single image + analyze_image() { + local dockerfile=$1 + local tag + tag=$(get_tag "$dockerfile") + + local total_size + total_size=$(get_total_size "$tag") + [[ -z "$total_size" || "$total_size" == "" ]] && total_size="0" + + local layers + layers=$(get_layers "$tag") + [[ -z "$layers" || "$layers" == "" ]] && layers="[]" + + local directories + directories=$(get_directories "$tag") + [[ -z "$directories" || "$directories" == "" ]] && directories="[]" + + local nix_packages + nix_packages=$(get_nix_packages "$tag") + [[ -z "$nix_packages" || "$nix_packages" == "" ]] && nix_packages="[]" + + local system_packages + system_packages=$(get_system_packages "$tag") + [[ -z "$system_packages" || "$system_packages" == "" ]] && system_packages="[]" + + # Build JSON result for this image + jq -n \ + --arg dockerfile "$dockerfile" \ + --argjson total_size "$total_size" \ + --argjson layers "$layers" \ + --argjson directories "$directories" \ + --argjson nix_packages "$nix_packages" \ + --argjson system_packages "$system_packages" \ + '{ + dockerfile: $dockerfile, + total_size_bytes: $total_size, + layers: $layers, + directories: $directories, + nix_packages: $nix_packages, + system_packages: $system_packages + }' + } + + # Print TUI output for a single image + print_tui() { + local json=$1 + + local dockerfile + dockerfile=$(echo "$json" | jq -r '.dockerfile') + + local total_size + total_size=$(echo "$json" | jq -r '.total_size_bytes') + + echo "" + echo "================================================================================" + echo "IMAGE: $dockerfile" + echo "================================================================================" + echo "Total Size: $(format_bytes "$total_size")" + echo "" + + echo "LAYERS (top 10 by size)" + echo "--------------------------------------------------------------------------------" + printf " %-4s %-12s %s\n" "#" "SIZE" "COMMAND" + echo "$json" | jq -r '.layers[] | "\(.index)\t\(.size_bytes)\t\(.command)"' 2>/dev/null | \ + while IFS=$'\t' read -r idx size cmd; do + cmd_short=$(echo "$cmd" | cut -c1-60) + printf " %-4s %-12s %s\n" "$idx" "$(format_bytes "$size")" "$cmd_short" + done + echo "" + + echo "DIRECTORIES (top 10 by size)" + echo "--------------------------------------------------------------------------------" + echo "$json" | jq -r '.directories[] | "\(.path)\t\(.size_bytes)"' 2>/dev/null | \ + while IFS=$'\t' read -r path size; do + printf " %-45s %s\n" "$path" "$(format_bytes "$size")" + done + echo "" + + echo "NIX PACKAGES (top 15 by size)" + echo "--------------------------------------------------------------------------------" + echo "$json" | jq -r '.nix_packages[] | "\(.name)\t\(.size_bytes)"' 2>/dev/null | \ + while IFS=$'\t' read -r name size; do + printf " %-45s %s\n" "$name" "$(format_bytes "$size")" + done + echo "" + + echo "SYSTEM PACKAGES (top 15 by size)" + echo "--------------------------------------------------------------------------------" + echo "$json" | jq -r '.system_packages[] | "\(.name)\t\(.size_bytes)"' 2>/dev/null | \ + while IFS=$'\t' read -r name size; do + printf " %-45s %s\n" "$name" "$(format_bytes "$size")" + done + } + + # Main execution + main() { + # Build images if needed + if [[ "$NO_BUILD" == "false" ]]; then + for dockerfile in "''${IMAGES[@]}"; do + build_image "$dockerfile" || exit 1 + done + fi + + # Analyze each image + declare -a results=() + for dockerfile in "''${IMAGES[@]}"; do + local tag + tag=$(get_tag "$dockerfile") + + # Check image exists + if ! docker image inspect "$tag" &>/dev/null; then + echo "Error: Image $tag not found. Run without --no-build to build it first." + exit 1 + fi + + echo "Analyzing $dockerfile..." >&2 + local result + result=$(analyze_image "$dockerfile") + results+=("$result") + done + + # Output results + if [[ "$OUTPUT_JSON" == "true" ]]; then + # Combine all results into JSON array + printf '%s\n' "''${results[@]}" | jq -s '{images: .}' + else + for result in "''${results[@]}"; do + print_tui "$result" + done + fi + } + + main + SCRIPT + chmod +x $out/bin/image-size-analyzer + wrapProgram $out/bin/image-size-analyzer \ + --prefix PATH : ${dive}/bin \ + --prefix PATH : ${jq}/bin \ + --prefix PATH : ${docker}/bin \ + --prefix PATH : ${coreutils}/bin \ + --prefix PATH : ${gawk}/bin \ + --prefix PATH : ${gnused}/bin \ + --prefix PATH : ${bc}/bin + '' diff --git a/nix/packages/pg-startup-profiler.nix b/nix/packages/pg-startup-profiler.nix new file mode 100644 index 000000000..3e38fd8c8 --- /dev/null +++ b/nix/packages/pg-startup-profiler.nix @@ -0,0 +1,37 @@ +{ pkgs, lib, ... }: +let + pg-startup-profiler = pkgs.buildGoModule { + pname = "pg-startup-profiler"; + version = "0.1.0"; + + src = ./pg-startup-profiler; + + vendorHash = "sha256-HAyyFdu/lgNISlv+vf+fpP3nMZ+aIE7dVRpzBnsaPC8="; + + subPackages = [ "cmd/pg-startup-profiler" ]; + + # Disable CGO for simpler builds (eBPF stub for non-Linux) + env.CGO_ENABLED = "0"; + + ldflags = [ + "-s" + "-w" + "-X main.version=0.1.0" + ]; + + doCheck = true; + checkPhase = '' + go test -v ./... + ''; + + meta = with lib; { + description = "PostgreSQL container startup profiler"; + mainProgram = "pg-startup-profiler"; + license = licenses.asl20; + platforms = platforms.linux ++ platforms.darwin; + }; + }; +in +{ + inherit pg-startup-profiler; +} diff --git a/nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/main.go b/nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/main.go new file mode 100644 index 000000000..a7ad64e8f --- /dev/null +++ b/nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/main.go @@ -0,0 +1,45 @@ +package main + +import ( + "os" + + "github.com/spf13/cobra" +) + +var ( + version = "dev" +) + +var rootCmd = &cobra.Command{ + Use: "pg-startup-profiler", + Short: "PostgreSQL container startup profiler", + Long: `pg-startup-profiler - Profile PostgreSQL container startup time + +A tool for measuring and analyzing PostgreSQL container startup performance +using eBPF tracing and log parsing. + +Commands: + profile Profile a container's startup time + compare Compare startup times between two images + +Examples: + # Profile a Dockerfile + pg-startup-profiler profile --dockerfile Dockerfile-17 + + # Profile existing image + pg-startup-profiler profile --image pg-docker-test:17 + + # JSON output + pg-startup-profiler profile --image pg-docker-test:17 --json + + # Compare two images + pg-startup-profiler compare --baseline img:v1 --candidate img:v2 +`, + Version: version, +} + +func main() { + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile.go b/nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile.go new file mode 100644 index 000000000..c69a9117e --- /dev/null +++ b/nix/packages/pg-startup-profiler/cmd/pg-startup-profiler/profile.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/spf13/cobra" + "github.com/supabase/pg-startup-profiler/internal/docker" + "github.com/supabase/pg-startup-profiler/internal/logger" + "github.com/supabase/pg-startup-profiler/internal/logs" + "github.com/supabase/pg-startup-profiler/internal/report" + "github.com/supabase/pg-startup-profiler/internal/rules" +) + +var ( + flagImage string + flagDockerfile string + flagJSON bool + flagVerbose bool + flagRulesFile string + flagTimeout time.Duration +) + +func init() { + profileCmd.Flags().StringVar(&flagImage, "image", "", "Docker image to profile") + profileCmd.Flags().StringVar(&flagDockerfile, "dockerfile", "", "Dockerfile to build and profile") + profileCmd.Flags().BoolVar(&flagJSON, "json", false, "Output as JSON") + profileCmd.Flags().BoolVar(&flagVerbose, "verbose", false, "Include full event timeline") + profileCmd.Flags().StringVar(&flagRulesFile, "rules", "", "Custom rules YAML file") + profileCmd.Flags().DurationVar(&flagTimeout, "timeout", 5*time.Minute, "Timeout for container startup") + + rootCmd.AddCommand(profileCmd) +} + +var profileCmd = &cobra.Command{ + Use: "profile", + Short: "Profile container startup time", + Long: "Profile a PostgreSQL container's startup time and show breakdown", + RunE: runProfile, +} + +func runProfile(cmd *cobra.Command, args []string) error { + log := logger.Setup(flagVerbose, false) + + if flagImage == "" && flagDockerfile == "" { + return fmt.Errorf("either --image or --dockerfile is required") + } + + // Load rules + var rulesData []byte + if flagRulesFile != "" { + data, err := os.ReadFile(flagRulesFile) + if err != nil { + return fmt.Errorf("failed to read rules file: %w", err) + } + rulesData = data + } else { + rulesData = rules.DefaultRulesYAML + } + + r, err := rules.LoadFromYAML(rulesData) + if err != nil { + return fmt.Errorf("failed to load rules: %w", err) + } + + // Create Docker client + dockerClient, err := docker.NewClient() + if err != nil { + return fmt.Errorf("failed to create docker client: %w", err) + } + defer dockerClient.Close() + + ctx, cancel := context.WithTimeout(context.Background(), flagTimeout) + defer cancel() + + // Handle signals + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + cancel() + }() + + imageName := flagImage + if flagDockerfile != "" { + // Build image + log.Info("Building image from Dockerfile", "dockerfile", flagDockerfile) + imageName = fmt.Sprintf("pg-startup-profiler-test:%d", time.Now().Unix()) + // For now, shell out to docker build + return fmt.Errorf("--dockerfile not yet implemented, please build image first and use --image") + } + + // Check image exists + exists, err := dockerClient.ImageExists(ctx, imageName) + if err != nil { + return fmt.Errorf("failed to check image: %w", err) + } + if !exists { + return fmt.Errorf("image not found: %s", imageName) + } + + log.Info("Profiling container startup", "image", imageName) + + // Create timeline + timeline := report.NewTimeline() + parser := logs.NewParser(r) + + // Create container + env := []string{"POSTGRES_PASSWORD=postgres"} + container, err := dockerClient.CreateContainer(ctx, imageName, env) + if err != nil { + return fmt.Errorf("failed to create container: %w", err) + } + defer func() { + dockerClient.StopContainer(context.Background(), container.ID) + dockerClient.RemoveContainer(context.Background(), container.ID) + }() + + // Start log streaming + logEvents := make(chan logs.Event, 100) + logDone := make(chan error, 1) + go func() { + err := dockerClient.StreamLogs(ctx, container.ID, func(line string, ts time.Time) { + parser.ParseLine(line, ts, logEvents) + }) + logDone <- err + }() + + // Start container and record time + startTime, err := dockerClient.StartContainer(ctx, container.ID) + if err != nil { + return fmt.Errorf("failed to start container: %w", err) + } + + timeline.AddEvent(report.Event{ + Type: report.EventTypeDocker, + Name: "container_start", + Timestamp: startTime, + }) + + // Wait for ready or timeout + ready := false + for !ready { + select { + case event := <-logEvents: + timeline.AddEvent(report.Event{ + Type: report.EventTypeLog, + Name: event.Name, + Timestamp: event.Timestamp, + Captures: event.Captures, + MarksReady: event.MarksReady, + }) + if event.MarksReady { + ready = true + } + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for container to be ready") + case err := <-logDone: + if err != nil && !ready { + return fmt.Errorf("log streaming error: %w", err) + } + } + } + + // Finalize timeline + timeline.Finalize() + + // Output results + if flagJSON { + return report.PrintJSON(os.Stdout, imageName, timeline, flagVerbose) + } + if flagVerbose { + report.PrintTableVerbose(os.Stdout, imageName, timeline) + } else { + report.PrintTable(os.Stdout, imageName, timeline) + } + return nil +} diff --git a/nix/packages/pg-startup-profiler/go.mod b/nix/packages/pg-startup-profiler/go.mod new file mode 100644 index 000000000..65f8c31cc --- /dev/null +++ b/nix/packages/pg-startup-profiler/go.mod @@ -0,0 +1,56 @@ +module github.com/supabase/pg-startup-profiler + +go 1.23.0 + +require ( + github.com/charmbracelet/log v0.4.2 + github.com/docker/docker v25.0.6+incompatible + github.com/spf13/cobra v1.8.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/Microsoft/go-winio v0.4.21 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/morikuni/aec v1.1.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/time v0.5.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gotest.tools/v3 v3.5.2 // indirect +) diff --git a/nix/packages/pg-startup-profiler/go.sum b/nix/packages/pg-startup-profiler/go.sum new file mode 100644 index 000000000..454d405ac --- /dev/null +++ b/nix/packages/pg-startup-profiler/go.sum @@ -0,0 +1,173 @@ +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.4.21 h1:+6mVbXh4wPzUrl1COX9A+ZCvEpYsOBZ6/+kwDnvLyro= +github.com/Microsoft/go-winio v0.4.21/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig= +github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= +github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= diff --git a/nix/packages/pg-startup-profiler/internal/docker/client.go b/nix/packages/pg-startup-profiler/internal/docker/client.go new file mode 100644 index 000000000..a729f6e3c --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/docker/client.go @@ -0,0 +1,138 @@ +// nix/packages/pg-startup-profiler/internal/docker/client.go +package docker + +import ( + "bufio" + "context" + "fmt" + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +type Client struct { + cli *client.Client +} + +type ContainerInfo struct { + ID string + CgroupID uint64 + StartTime time.Time +} + +func NewClient() (*Client, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, fmt.Errorf("failed to create docker client: %w", err) + } + return &Client{cli: cli}, nil +} + +func (c *Client) Close() error { + return c.cli.Close() +} + +func (c *Client) ImageExists(ctx context.Context, imageName string) (bool, error) { + _, _, err := c.cli.ImageInspectWithRaw(ctx, imageName) + if err != nil { + if client.IsErrNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} + +func (c *Client) BuildImage(ctx context.Context, dockerfile, contextPath, tag string) error { + // Implementation for building from Dockerfile + // Uses docker build API + return fmt.Errorf("not implemented - use docker build externally") +} + +func (c *Client) CreateContainer(ctx context.Context, imageName string, env []string) (*ContainerInfo, error) { + resp, err := c.cli.ContainerCreate(ctx, &container.Config{ + Image: imageName, + Env: env, + }, &container.HostConfig{}, nil, nil, "") + if err != nil { + return nil, fmt.Errorf("failed to create container: %w", err) + } + + return &ContainerInfo{ + ID: resp.ID, + }, nil +} + +func (c *Client) StartContainer(ctx context.Context, containerID string) (time.Time, error) { + startTime := time.Now() + if err := c.cli.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { + return time.Time{}, fmt.Errorf("failed to start container: %w", err) + } + return startTime, nil +} + +func (c *Client) StopContainer(ctx context.Context, containerID string) error { + timeout := 10 + return c.cli.ContainerStop(ctx, containerID, container.StopOptions{Timeout: &timeout}) +} + +func (c *Client) RemoveContainer(ctx context.Context, containerID string) error { + return c.cli.ContainerRemove(ctx, containerID, container.RemoveOptions{Force: true}) +} + +func (c *Client) GetContainerCgroupID(ctx context.Context, containerID string) (uint64, error) { + inspect, err := c.cli.ContainerInspect(ctx, containerID) + if err != nil { + return 0, err + } + // The cgroup path contains the container ID + // We need to get the cgroup inode for eBPF filtering + // This is platform-specific and may need adjustment + _ = inspect + return 0, fmt.Errorf("cgroup ID extraction not implemented") +} + +func (c *Client) StreamLogs(ctx context.Context, containerID string, callback func(line string, timestamp time.Time)) error { + options := container.LogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + Timestamps: true, + } + + reader, err := c.cli.ContainerLogs(ctx, containerID, options) + if err != nil { + return err + } + defer reader.Close() + + // Docker multiplexes stdout/stderr, need to demux + pr, pw := io.Pipe() + go func() { + stdcopy.StdCopy(pw, pw, reader) + pw.Close() + }() + + scanner := bufio.NewScanner(pr) + for scanner.Scan() { + line := scanner.Text() + // Docker prepends timestamp when Timestamps: true + callback(line, time.Now()) + } + + return scanner.Err() +} + +func (c *Client) PullImage(ctx context.Context, imageName string) error { + reader, err := c.cli.ImagePull(ctx, imageName, types.ImagePullOptions{}) + if err != nil { + return err + } + defer reader.Close() + io.Copy(io.Discard, reader) + return nil +} diff --git a/nix/packages/pg-startup-profiler/internal/docker/client_test.go b/nix/packages/pg-startup-profiler/internal/docker/client_test.go new file mode 100644 index 000000000..c74fee6ae --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/docker/client_test.go @@ -0,0 +1,18 @@ +// nix/packages/pg-startup-profiler/internal/docker/client_test.go +package docker + +import ( + "testing" +) + +func TestNewClient(t *testing.T) { + client, err := NewClient() + if err != nil { + t.Skipf("Docker not available: %v", err) + } + defer client.Close() + + if client.cli == nil { + t.Error("expected client to be initialized") + } +} diff --git a/nix/packages/pg-startup-profiler/internal/ebpf/tracer.go b/nix/packages/pg-startup-profiler/internal/ebpf/tracer.go new file mode 100644 index 000000000..5047f68df --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/ebpf/tracer.go @@ -0,0 +1,59 @@ +//go:build linux + +package ebpf + +import ( + "context" + "time" +) + +type ExecEvent struct { + Timestamp time.Time + Comm string + Args string + PID uint32 +} + +type OpenEvent struct { + Timestamp time.Time + Path string + PID uint32 +} + +type Tracer struct { + cgroupID uint64 + execChan chan ExecEvent + openChan chan OpenEvent +} + +func NewTracer(cgroupID uint64) (*Tracer, error) { + return &Tracer{ + cgroupID: cgroupID, + execChan: make(chan ExecEvent, 1000), + openChan: make(chan OpenEvent, 1000), + }, nil +} + +func (t *Tracer) Start(ctx context.Context) error { + // TODO: Implement actual eBPF probe attachment + // This requires: + // 1. Load eBPF program from embedded bytecode + // 2. Attach to tracepoints + // 3. Set up perf buffer for events + // 4. Filter by cgroup ID + return nil +} + +func (t *Tracer) Stop() error { + close(t.execChan) + close(t.openChan) + return nil +} + +func (t *Tracer) ExecEvents() <-chan ExecEvent { + return t.execChan +} + +func (t *Tracer) OpenEvents() <-chan OpenEvent { + return t.openChan +} diff --git a/nix/packages/pg-startup-profiler/internal/ebpf/tracer_stub.go b/nix/packages/pg-startup-profiler/internal/ebpf/tracer_stub.go new file mode 100644 index 000000000..1e6e22f63 --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/ebpf/tracer_stub.go @@ -0,0 +1,47 @@ +//go:build !linux + +package ebpf + +import ( + "context" + "fmt" + "time" +) + +type ExecEvent struct { + Timestamp time.Time + Comm string + Args string + PID uint32 +} + +type OpenEvent struct { + Timestamp time.Time + Path string + PID uint32 +} + +type Tracer struct { + execChan chan ExecEvent + openChan chan OpenEvent +} + +func NewTracer(cgroupID uint64) (*Tracer, error) { + return nil, fmt.Errorf("eBPF tracing is only supported on Linux") +} + +func (t *Tracer) Start(ctx context.Context) error { + return fmt.Errorf("eBPF tracing is only supported on Linux") +} + +func (t *Tracer) Stop() error { + return nil +} + +func (t *Tracer) ExecEvents() <-chan ExecEvent { + return nil +} + +func (t *Tracer) OpenEvents() <-chan OpenEvent { + return nil +} diff --git a/nix/packages/pg-startup-profiler/internal/logger/logger.go b/nix/packages/pg-startup-profiler/internal/logger/logger.go new file mode 100644 index 000000000..502b26fe8 --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/logger/logger.go @@ -0,0 +1,26 @@ +package logger + +import ( + "io" + "os" + + "github.com/charmbracelet/log" +) + +func Setup(verbose, debug bool) *log.Logger { + var output io.Writer = io.Discard + var level log.Level = log.InfoLevel + + if debug { + output = os.Stderr + level = log.DebugLevel + } else if verbose { + output = os.Stderr + level = log.InfoLevel + } + + return log.NewWithOptions(output, log.Options{ + Level: level, + ReportTimestamp: debug, + }) +} diff --git a/nix/packages/pg-startup-profiler/internal/logs/parser.go b/nix/packages/pg-startup-profiler/internal/logs/parser.go new file mode 100644 index 000000000..489c671a4 --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/logs/parser.go @@ -0,0 +1,45 @@ +package logs + +import ( + "time" + + "github.com/supabase/pg-startup-profiler/internal/rules" +) + +type Event struct { + Name string + Timestamp time.Time + Captures map[string]string + Line string + MarksReady bool +} + +type Parser struct { + rules *rules.Rules +} + +func NewParser(r *rules.Rules) *Parser { + return &Parser{rules: r} +} + +func (p *Parser) ParseLine(line string, fallbackTime time.Time, events chan<- Event) { + match := p.rules.Match(line) + if match != nil { + ts := match.Timestamp + // Use fallback time if no timestamp was parsed from the line + if ts.IsZero() { + ts = fallbackTime + } + events <- Event{ + Name: match.Pattern.Name, + Timestamp: ts, + Captures: match.Captures, + Line: line, + MarksReady: match.Pattern.MarksReady, + } + } +} + +func (p *Parser) Reset() { + p.rules.Reset() +} diff --git a/nix/packages/pg-startup-profiler/internal/logs/parser_test.go b/nix/packages/pg-startup-profiler/internal/logs/parser_test.go new file mode 100644 index 000000000..17ec17535 --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/logs/parser_test.go @@ -0,0 +1,39 @@ +package logs + +import ( + "testing" + "time" + + "github.com/supabase/pg-startup-profiler/internal/rules" +) + +func TestParser(t *testing.T) { + rulesYAML := ` +patterns: + - name: "ready" + regex: 'database system is ready to accept connections' + marks_ready: true + +timestamp: + regex: '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +` + r, _ := rules.LoadFromYAML([]byte(rulesYAML)) + parser := NewParser(r) + + events := make(chan Event, 10) + fallbackTime := time.Now() + go func() { + parser.ParseLine("2026-01-30 13:18:21.286 UTC [41] LOG: database system is ready to accept connections", fallbackTime, events) + close(events) + }() + + event := <-events + if event.Name != "ready" { + t.Errorf("expected event name 'ready', got '%s'", event.Name) + } + + if event.MarksReady != true { + t.Error("expected event to mark ready") + } +} diff --git a/nix/packages/pg-startup-profiler/internal/report/json.go b/nix/packages/pg-startup-profiler/internal/report/json.go new file mode 100644 index 000000000..66421585f --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/report/json.go @@ -0,0 +1,95 @@ +package report + +import ( + "encoding/json" + "io" +) + +type JSONReport struct { + Image string `json:"image"` + TotalDurationMs int64 `json:"total_duration_ms"` + Phases []JSONPhase `json:"phases"` + InitScripts []JSONScript `json:"init_scripts"` + Extensions []JSONExtension `json:"extensions"` + BGWorkers []JSONWorker `json:"background_workers"` + Events []JSONEvent `json:"events,omitempty"` +} + +type JSONPhase struct { + Name string `json:"name"` + DurationMs int64 `json:"duration_ms"` + Percent float64 `json:"pct"` +} + +type JSONScript struct { + Path string `json:"path"` + DurationMs int64 `json:"duration_ms"` +} + +type JSONExtension struct { + Name string `json:"name"` + LoadTimeMs int64 `json:"load_time_ms"` +} + +type JSONWorker struct { + Name string `json:"name"` + StartedAtMs int64 `json:"started_at_ms"` +} + +type JSONEvent struct { + Type string `json:"type"` + Name string `json:"name"` + OffsetMs int64 `json:"offset_ms"` + Captures map[string]string `json:"captures,omitempty"` +} + +func PrintJSON(w io.Writer, imageName string, tl *Timeline, verbose bool) error { + report := JSONReport{ + Image: imageName, + TotalDurationMs: tl.TotalDuration.Milliseconds(), + } + + for _, p := range tl.Phases { + report.Phases = append(report.Phases, JSONPhase{ + Name: p.Name, + DurationMs: p.Duration.Milliseconds(), + Percent: p.Percent, + }) + } + + for _, s := range tl.InitScripts { + report.InitScripts = append(report.InitScripts, JSONScript{ + Path: s.Path, + DurationMs: s.Duration.Milliseconds(), + }) + } + + for _, e := range tl.Extensions { + report.Extensions = append(report.Extensions, JSONExtension{ + Name: e.Name, + LoadTimeMs: e.LoadTime.Milliseconds(), + }) + } + + for _, bw := range tl.BGWorkers { + report.BGWorkers = append(report.BGWorkers, JSONWorker{ + Name: bw.Name, + StartedAtMs: bw.StartedAt.Milliseconds(), + }) + } + + if verbose { + for _, e := range tl.Events { + report.Events = append(report.Events, JSONEvent{ + Type: string(e.Type), + Name: e.Name, + OffsetMs: e.Duration.Milliseconds(), + Captures: e.Captures, + }) + } + } + + encoder := json.NewEncoder(w) + encoder.SetIndent("", " ") + return encoder.Encode(report) +} diff --git a/nix/packages/pg-startup-profiler/internal/report/table.go b/nix/packages/pg-startup-profiler/internal/report/table.go new file mode 100644 index 000000000..06957a952 --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/report/table.go @@ -0,0 +1,116 @@ +package report + +import ( + "fmt" + "io" + "sort" + "strings" + "time" +) + +func PrintTable(w io.Writer, imageName string, tl *Timeline) { + fmt.Fprintln(w, strings.Repeat("=", 80)) + fmt.Fprintln(w, "PostgreSQL Container Startup Profile") + fmt.Fprintln(w, strings.Repeat("=", 80)) + fmt.Fprintln(w) + fmt.Fprintf(w, "Image: %s\n", imageName) + fmt.Fprintf(w, "Total: %s\n", formatDuration(tl.TotalDuration)) + fmt.Fprintln(w) + + // Phases + fmt.Fprintln(w, "PHASES") + fmt.Fprintln(w, strings.Repeat("-", 80)) + fmt.Fprintf(w, " %-30s %-12s %-8s\n", "Phase", "Duration", "Pct") + fmt.Fprintln(w, " "+strings.Repeat("-", 50)) + for _, p := range tl.Phases { + fmt.Fprintf(w, " %-30s %-12s %5.1f%%\n", p.Name, formatDuration(p.Duration), p.Percent) + } + fmt.Fprintln(w) + + // Init scripts (top 5) + if len(tl.InitScripts) > 0 { + fmt.Fprintln(w, "INIT SCRIPTS (top 5 by duration)") + fmt.Fprintln(w, strings.Repeat("-", 80)) + + // Sort by duration + sorted := make([]ScriptTiming, len(tl.InitScripts)) + copy(sorted, tl.InitScripts) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].Duration > sorted[j].Duration + }) + + limit := 5 + if len(sorted) < limit { + limit = len(sorted) + } + + fmt.Fprintf(w, " %-50s %s\n", "Script", "Duration") + fmt.Fprintln(w, " "+strings.Repeat("-", 60)) + for _, s := range sorted[:limit] { + // Truncate path for display + path := s.Path + if len(path) > 48 { + path = "..." + path[len(path)-45:] + } + fmt.Fprintf(w, " %-50s %s\n", path, formatDuration(s.Duration)) + } + fmt.Fprintln(w) + } + + // Extensions + if len(tl.Extensions) > 0 { + fmt.Fprintln(w, "EXTENSIONS") + fmt.Fprintln(w, strings.Repeat("-", 80)) + fmt.Fprintf(w, " %-20s %s\n", "Extension", "Loaded at") + fmt.Fprintln(w, " "+strings.Repeat("-", 30)) + for _, e := range tl.Extensions { + fmt.Fprintf(w, " %-20s %s\n", e.Name, formatDuration(e.LoadTime)) + } + fmt.Fprintln(w) + } + + // Background workers + if len(tl.BGWorkers) > 0 { + fmt.Fprintln(w, "BACKGROUND WORKERS") + fmt.Fprintln(w, strings.Repeat("-", 80)) + fmt.Fprintf(w, " %-20s %s\n", "Worker", "Started at") + fmt.Fprintln(w, " "+strings.Repeat("-", 30)) + for _, bw := range tl.BGWorkers { + fmt.Fprintf(w, " %-20s %s\n", bw.Name, formatDuration(bw.StartedAt)) + } + fmt.Fprintln(w) + } +} + +func PrintTableVerbose(w io.Writer, imageName string, tl *Timeline) { + PrintTable(w, imageName, tl) + + // Event timeline (verbose) + if len(tl.Events) > 0 { + fmt.Fprintln(w, "EVENT TIMELINE") + fmt.Fprintln(w, strings.Repeat("-", 80)) + for _, e := range tl.Events { + fmt.Fprintf(w, " [%s] %-8s %s\n", + formatDuration(e.Duration), + e.Type, + truncate(e.Name, 60)) + } + } +} + +func formatDuration(d time.Duration) string { + if d < time.Millisecond { + return fmt.Sprintf("%.3fms", float64(d.Microseconds())/1000) + } + if d < time.Second { + return fmt.Sprintf("%.0fms", float64(d.Milliseconds())) + } + return fmt.Sprintf("%.3fs", d.Seconds()) +} + +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} diff --git a/nix/packages/pg-startup-profiler/internal/report/timeline.go b/nix/packages/pg-startup-profiler/internal/report/timeline.go new file mode 100644 index 000000000..365cf2e5c --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/report/timeline.go @@ -0,0 +1,167 @@ +package report + +import ( + "sort" + "time" +) + +type EventType string + +const ( + EventTypeDocker EventType = "DOCKER" + EventTypeExec EventType = "EXEC" + EventTypeOpen EventType = "OPEN" + EventTypeLog EventType = "LOG" +) + +type Event struct { + Type EventType + Name string + Timestamp time.Time + Duration time.Duration + Details string + Captures map[string]string + MarksReady bool +} + +type Phase struct { + Name string + Start time.Time + End time.Time + Duration time.Duration + Percent float64 +} + +type Timeline struct { + Events []Event + Phases []Phase + TotalDuration time.Duration + StartTime time.Time + EndTime time.Time + Extensions []ExtensionTiming + InitScripts []ScriptTiming + BGWorkers []WorkerTiming +} + +type ExtensionTiming struct { + Name string + LoadTime time.Duration +} + +type ScriptTiming struct { + Path string + Duration time.Duration +} + +type WorkerTiming struct { + Name string + StartedAt time.Duration +} + +func NewTimeline() *Timeline { + return &Timeline{ + Events: make([]Event, 0), + } +} + +func (t *Timeline) AddEvent(e Event) { + t.Events = append(t.Events, e) +} + +func (t *Timeline) Finalize() { + if len(t.Events) == 0 { + return + } + + // Sort by timestamp + sort.Slice(t.Events, func(i, j int) bool { + return t.Events[i].Timestamp.Before(t.Events[j].Timestamp) + }) + + t.StartTime = t.Events[0].Timestamp + + // Find the ready event + for _, e := range t.Events { + if e.MarksReady { + t.EndTime = e.Timestamp + break + } + } + + if t.EndTime.IsZero() { + t.EndTime = t.Events[len(t.Events)-1].Timestamp + } + + t.TotalDuration = t.EndTime.Sub(t.StartTime) + + // Calculate relative timestamps + for i := range t.Events { + t.Events[i].Duration = t.Events[i].Timestamp.Sub(t.StartTime) + } + + // Extract extension timings + t.extractExtensions() + + // Extract init script timings + t.extractInitScripts() + + // Extract background worker timings + t.extractBGWorkers() + + // Build phases + t.buildPhases() +} + +func (t *Timeline) extractExtensions() { + for _, e := range t.Events { + if e.Name == "extension_load" { + if ext, ok := e.Captures["extension"]; ok { + t.Extensions = append(t.Extensions, ExtensionTiming{ + Name: ext, + LoadTime: e.Duration, + }) + } + } + } +} + +func (t *Timeline) extractInitScripts() { + var lastScript string + var lastTime time.Time + + for _, e := range t.Events { + if e.Name == "migration_file" { + if file, ok := e.Captures["file"]; ok { + if lastScript != "" { + t.InitScripts = append(t.InitScripts, ScriptTiming{ + Path: lastScript, + Duration: e.Timestamp.Sub(lastTime), + }) + } + lastScript = file + lastTime = e.Timestamp + } + } + } +} + +func (t *Timeline) extractBGWorkers() { + for _, e := range t.Events { + if e.Name == "bgworker_start" { + if worker, ok := e.Captures["worker"]; ok { + t.BGWorkers = append(t.BGWorkers, WorkerTiming{ + Name: worker, + StartedAt: e.Duration, + }) + } + } + } +} + +func (t *Timeline) buildPhases() { + // Simplified phase detection + // In practice, would use more sophisticated logic based on events + t.Phases = []Phase{ + {Name: "Total", Duration: t.TotalDuration, Percent: 100.0}, + } +} diff --git a/nix/packages/pg-startup-profiler/internal/report/timeline_test.go b/nix/packages/pg-startup-profiler/internal/report/timeline_test.go new file mode 100644 index 000000000..0c29c64c9 --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/report/timeline_test.go @@ -0,0 +1,34 @@ +package report + +import ( + "testing" + "time" +) + +func TestTimeline(t *testing.T) { + tl := NewTimeline() + + start := time.Now() + tl.AddEvent(Event{ + Type: EventTypeDocker, + Name: "container_start", + Timestamp: start, + }) + + tl.AddEvent(Event{ + Type: EventTypeLog, + Name: "final_server_ready", + Timestamp: start.Add(5 * time.Second), + MarksReady: true, + }) + + tl.Finalize() + + if tl.TotalDuration != 5*time.Second { + t.Errorf("expected 5s duration, got %v", tl.TotalDuration) + } + + if len(tl.Events) != 2 { + t.Errorf("expected 2 events, got %d", len(tl.Events)) + } +} diff --git a/nix/packages/pg-startup-profiler/internal/rules/default.go b/nix/packages/pg-startup-profiler/internal/rules/default.go new file mode 100644 index 000000000..3ccb0244b --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/rules/default.go @@ -0,0 +1,8 @@ +package rules + +import ( + _ "embed" +) + +//go:embed default.yaml +var DefaultRulesYAML []byte diff --git a/nix/packages/pg-startup-profiler/internal/rules/default.yaml b/nix/packages/pg-startup-profiler/internal/rules/default.yaml new file mode 100644 index 000000000..b7da63f1c --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/rules/default.yaml @@ -0,0 +1,35 @@ +# Default rules for PostgreSQL container startup profiling +patterns: + - name: "initdb_start" + regex: 'running bootstrap script' + + - name: "initdb_complete" + regex: 'syncing data to disk' + + - name: "temp_server_start" + regex: 'database system is ready to accept connections' + occurrence: 1 + + - name: "server_shutdown" + regex: 'database system is shut down' + + - name: "final_server_ready" + regex: 'database system is ready to accept connections' + occurrence: 2 + marks_ready: true + + - name: "extension_load" + regex: 'statement: CREATE EXTENSION.*"(?P[^"]+)"' + capture: extension + + - name: "bgworker_start" + regex: '(?Ppg_cron|pg_net).*started' + capture: worker + + - name: "migration_file" + regex: 'running (?P/docker-entrypoint-initdb\.d/[^\s]+)' + capture: file + +timestamp: + regex: '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" diff --git a/nix/packages/pg-startup-profiler/internal/rules/rules.go b/nix/packages/pg-startup-profiler/internal/rules/rules.go new file mode 100644 index 000000000..74f70c44d --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/rules/rules.go @@ -0,0 +1,129 @@ +package rules + +import ( + "regexp" + "time" + + "gopkg.in/yaml.v3" +) + +type Pattern struct { + Name string `yaml:"name"` + Regex string `yaml:"regex"` + Occurrence int `yaml:"occurrence,omitempty"` + MarksReady bool `yaml:"marks_ready,omitempty"` + Capture string `yaml:"capture,omitempty"` + + compiled *regexp.Regexp + seen int +} + +type TimestampConfig struct { + Regex string `yaml:"regex"` + Format string `yaml:"format"` + + compiled *regexp.Regexp +} + +type Rules struct { + Patterns []*Pattern `yaml:"patterns"` + Timestamp TimestampConfig `yaml:"timestamp"` + + // regexCounts tracks how many times each unique regex has been seen + // This allows multiple patterns with the same regex but different occurrence values + regexCounts map[string]int +} + +type Match struct { + Pattern *Pattern + Timestamp time.Time + Captures map[string]string + Line string +} + +func LoadFromYAML(data []byte) (*Rules, error) { + var rules Rules + if err := yaml.Unmarshal(data, &rules); err != nil { + return nil, err + } + + // Initialize regex counts map + rules.regexCounts = make(map[string]int) + + // Compile patterns + for _, p := range rules.Patterns { + compiled, err := regexp.Compile(p.Regex) + if err != nil { + return nil, err + } + p.compiled = compiled + if p.Occurrence == 0 { + p.Occurrence = 1 + } + } + + // Compile timestamp regex + if rules.Timestamp.Regex != "" { + compiled, err := regexp.Compile(rules.Timestamp.Regex) + if err != nil { + return nil, err + } + rules.Timestamp.compiled = compiled + } + + return &rules, nil +} + +func (r *Rules) Match(line string) *Match { + // Track which regexes matched in this line to only increment count once per regex + matchedRegexes := make(map[string]bool) + + for _, p := range r.Patterns { + if p.compiled.MatchString(line) { + // Only increment counter once per unique regex per line + if !matchedRegexes[p.Regex] { + matchedRegexes[p.Regex] = true + r.regexCounts[p.Regex]++ + } + + if r.regexCounts[p.Regex] == p.Occurrence { + match := &Match{ + Pattern: p, + Line: line, + Captures: make(map[string]string), + } + + // Extract timestamp + if r.Timestamp.compiled != nil { + if ts := r.Timestamp.compiled.FindStringSubmatch(line); len(ts) > 1 { + if t, err := time.Parse(r.Timestamp.Format, ts[1]); err == nil { + match.Timestamp = t + } + } + } + + // Extract named captures + if p.Capture != "" { + names := p.compiled.SubexpNames() + matches := p.compiled.FindStringSubmatch(line) + for i, name := range names { + if name != "" && i < len(matches) { + match.Captures[name] = matches[i] + } + } + } + + return match + } + } + } + return nil +} + +func (r *Rules) Reset() { + for _, p := range r.Patterns { + p.seen = 0 + } + // Clear the shared regex counts + r.regexCounts = make(map[string]int) +} diff --git a/nix/packages/pg-startup-profiler/internal/rules/rules_test.go b/nix/packages/pg-startup-profiler/internal/rules/rules_test.go new file mode 100644 index 000000000..ac733003f --- /dev/null +++ b/nix/packages/pg-startup-profiler/internal/rules/rules_test.go @@ -0,0 +1,63 @@ +package rules + +import ( + "testing" +) + +func TestLoadRules(t *testing.T) { + yaml := ` +patterns: + - name: "test_pattern" + regex: 'database system is ready' + marks_ready: true + +timestamp: + regex: '^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +` + rules, err := LoadFromYAML([]byte(yaml)) + if err != nil { + t.Fatalf("failed to load rules: %v", err) + } + + if len(rules.Patterns) != 1 { + t.Errorf("expected 1 pattern, got %d", len(rules.Patterns)) + } + + if rules.Patterns[0].Name != "test_pattern" { + t.Errorf("expected name 'test_pattern', got '%s'", rules.Patterns[0].Name) + } + + if !rules.Patterns[0].MarksReady { + t.Error("expected marks_ready to be true") + } +} + +func TestPatternMatch(t *testing.T) { + yaml := ` +patterns: + - name: "ready" + regex: 'database system is ready to accept connections' + marks_ready: true + +timestamp: + regex: '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" +` + rules, _ := LoadFromYAML([]byte(yaml)) + + line := "2026-01-30 13:18:21.286 UTC [41] LOG: database system is ready to accept connections" + match := rules.Match(line) + + if match == nil { + t.Fatal("expected match, got nil") + } + + if match.Pattern.Name != "ready" { + t.Errorf("expected pattern 'ready', got '%s'", match.Pattern.Name) + } + + if match.Timestamp.IsZero() { + t.Error("expected timestamp to be parsed") + } +} diff --git a/nix/packages/pg-startup-profiler/rules/default.yaml b/nix/packages/pg-startup-profiler/rules/default.yaml new file mode 100644 index 000000000..b7da63f1c --- /dev/null +++ b/nix/packages/pg-startup-profiler/rules/default.yaml @@ -0,0 +1,35 @@ +# Default rules for PostgreSQL container startup profiling +patterns: + - name: "initdb_start" + regex: 'running bootstrap script' + + - name: "initdb_complete" + regex: 'syncing data to disk' + + - name: "temp_server_start" + regex: 'database system is ready to accept connections' + occurrence: 1 + + - name: "server_shutdown" + regex: 'database system is shut down' + + - name: "final_server_ready" + regex: 'database system is ready to accept connections' + occurrence: 2 + marks_ready: true + + - name: "extension_load" + regex: 'statement: CREATE EXTENSION.*"(?P[^"]+)"' + capture: extension + + - name: "bgworker_start" + regex: '(?Ppg_cron|pg_net).*started' + capture: worker + + - name: "migration_file" + regex: 'running (?P/docker-entrypoint-initdb\.d/[^\s]+)' + capture: file + +timestamp: + regex: '(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} \w+)' + format: "2006-01-02 15:04:05.000 MST" diff --git a/nix/packages/postgres.nix b/nix/packages/postgres.nix index ad1779f68..0cfacbab5 100644 --- a/nix/packages/postgres.nix +++ b/nix/packages/postgres.nix @@ -3,6 +3,12 @@ perSystem = { pkgs, lib, ... }: let + # Minimal glibc locales for slim images - only en_US.UTF-8 (~3MB vs ~200MB) + glibcLocalesMinimal = pkgs.glibcLocales.override { + allLocales = false; + locales = [ "en_US.UTF-8/UTF-8" ]; + }; + # Custom extensions that exist in our repository. These aren't upstream # either because nobody has done the work, maintaining them here is # easier and more expedient, or because they may not be suitable, or are @@ -55,7 +61,12 @@ orioledbExtensions = orioleFilteredExtensions ++ [ ../ext/orioledb.nix ]; dbExtensions17 = orioleFilteredExtensions; - getPostgresqlPackage = version: pkgs."postgresql_${version}"; + getPostgresqlPackage = + version: latestOnly: + let + base = pkgs."postgresql_${version}"; + in + if latestOnly then base.override { systemdSupport = false; } else base; # Create a 'receipt' file for a given postgresql package. This is a way # of adding a bit of metadata to the package, which can be used by other # tools to inspect what the contents of the install are: the PSQL @@ -93,8 +104,11 @@ makeOurPostgresPkgs = version: + { + latestOnly ? false, + }: let - postgresql = getPostgresqlPackage version; + postgresql = getPostgresqlPackage version latestOnly; extensionsToUse = if (builtins.elem version [ "orioledb-17" ]) then orioledbExtensions @@ -105,7 +119,7 @@ extCallPackage = pkgs.lib.callPackageWith ( pkgs // { - inherit postgresql; + inherit postgresql latestOnly; switch-ext-version = extCallPackage ./switch-ext-version.nix { }; overlayfs-on-package = extCallPackage ./overlayfs-on-package.nix { }; } @@ -116,8 +130,11 @@ # Create an attrset that contains all the extensions included in a server. makeOurPostgresPkgsSet = version: + { + latestOnly ? false, + }: let - pkgsList = makeOurPostgresPkgs version; + pkgsList = makeOurPostgresPkgs version { inherit latestOnly; }; baseAttrs = builtins.listToAttrs ( map (drv: { name = drv.name; @@ -142,22 +159,31 @@ # basis for building extensions, etc. makePostgresBin = version: + { + latestOnly ? false, + }: let - postgresql = getPostgresqlPackage version; - postgres-pkgs = makeOurPostgresPkgs version; + postgresql = getPostgresqlPackage version latestOnly; + postgres-pkgs = makeOurPostgresPkgs version { inherit latestOnly; }; ourExts = map (ext: { name = ext.name; version = ext.version; }) postgres-pkgs; pgbin = postgresql.withPackages (_ps: postgres-pkgs); + + # For slim packages, include minimal glibc locales for initdb locale support + extraPaths = lib.optionals (latestOnly && pkgs.stdenv.isLinux) [ + glibcLocalesMinimal + ]; in pkgs.symlinkJoin { inherit (pgbin) name version; paths = [ pgbin (makeReceipt pgbin ourExts) - ]; + ] + ++ extraPaths; }; # Create an attribute set, containing all the relevant packages for a @@ -171,22 +197,30 @@ # package names. makePostgres = version: + { + latestOnly ? false, + }: lib.recurseIntoAttrs { - bin = makePostgresBin version; - exts = makeOurPostgresPkgsSet version; + bin = makePostgresBin version { inherit latestOnly; }; + exts = makeOurPostgresPkgsSet version { inherit latestOnly; }; }; basePackages = { - psql_15 = makePostgres "15"; - psql_17 = makePostgres "17"; - psql_orioledb-17 = makePostgres "orioledb-17"; + psql_15 = makePostgres "15" { }; + psql_17 = makePostgres "17" { }; + psql_orioledb-17 = makePostgres "orioledb-17" { }; + }; + slimPackages = { + psql_15_slim = makePostgres "15" { latestOnly = true; }; + psql_17_slim = makePostgres "17" { latestOnly = true; }; + psql_orioledb-17_slim = makePostgres "orioledb-17" { latestOnly = true; }; }; binPackages = lib.mapAttrs' (name: value: { name = "${name}/bin"; value = value.bin; - }) basePackages; + }) (basePackages // slimPackages); in { packages = binPackages; - legacyPackages = basePackages; + legacyPackages = basePackages // slimPackages; }; } diff --git a/nix/packages/supabase-cli.nix b/nix/packages/supabase-cli.nix new file mode 100644 index 000000000..a6b460be6 --- /dev/null +++ b/nix/packages/supabase-cli.nix @@ -0,0 +1,50 @@ +{ + lib, + stdenv, + fetchurl, + autoPatchelfHook, +}: +let + version = "2.75.0"; + + sources = { + x86_64-linux = { + url = "https://github.com/supabase/cli/releases/download/v${version}/supabase_linux_amd64.tar.gz"; + hash = "sha256-5Vl0Yvhl1axyrwRTNY437LDYWWKtaRShFKU96EcwO94="; + }; + aarch64-linux = { + url = "https://github.com/supabase/cli/releases/download/v${version}/supabase_linux_arm64.tar.gz"; + hash = "sha256-tVHC+OA3Fb5CjSWSdlIAqeNxFo/DbsJkaoTsxTDTEg4="; + }; + aarch64-darwin = { + url = "https://github.com/supabase/cli/releases/download/v${version}/supabase_darwin_arm64.tar.gz"; + hash = "sha256-ZhhzZIcoep8INcRKcQFpTetMoRkoUqBwqTAUQ3ZBzqM="; + }; + }; + + src = fetchurl { + inherit (sources.${stdenv.hostPlatform.system}) url hash; + }; +in +stdenv.mkDerivation { + pname = "supabase-cli"; + inherit version src; + + sourceRoot = "."; + + nativeBuildInputs = lib.optionals stdenv.hostPlatform.isLinux [ autoPatchelfHook ]; + + installPhase = '' + runHook preInstall + install -Dm755 supabase $out/bin/supabase + runHook postInstall + ''; + + meta = with lib; { + description = "Supabase CLI"; + homepage = "https://github.com/supabase/cli"; + license = licenses.mit; + platforms = builtins.attrNames sources; + mainProgram = "supabase"; + }; +} diff --git a/nix/postgresql/generic.nix b/nix/postgresql/generic.nix index 6eaf873ae..8e648789b 100644 --- a/nix/postgresql/generic.nix +++ b/nix/postgresql/generic.nix @@ -132,7 +132,13 @@ let ++ lib.optionals pythonSupport [ python3 ] ++ lib.optionals gssSupport [ libkrb5 ] ++ lib.optionals stdenv'.isLinux [ linux-pam ] - ++ lib.optionals (!stdenv'.isDarwin) [ libossp_uuid ] + ++ lib.optionals (!stdenv'.isDarwin) [ libossp_uuid ]; + + nativeBuildInputs = [ + makeWrapper + pkg-config + ] + # Build tools for PG17+ and OrioleDB - these are NOT runtime dependencies ++ lib.optionals (isOrioleDB || (lib.versionAtLeast version "17")) [ perl bison @@ -141,11 +147,6 @@ let docbook_xml_dtd_45 docbook_xsl_ns libxslt - ]; - - nativeBuildInputs = [ - makeWrapper - pkg-config ] ++ lib.optionals jitSupport [ llvmPackages.llvm.dev diff --git a/nix/tests/expected/http.out b/nix/tests/expected/http.out index d83488006..f3e8d7679 100644 --- a/nix/tests/expected/http.out +++ b/nix/tests/expected/http.out @@ -1,7 +1,7 @@ -- Test for http extension -- Basic HTTP functionality tests -- Test basic HTTP GET request -SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); +SELECT status FROM http_get('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); status -------- 200 @@ -11,7 +11,7 @@ SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_confi SELECT status, content_type FROM http(( 'GET', - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/headers', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/headers', ARRAY[http_header('User-Agent', 'pg_http_test')], NULL, NULL @@ -23,7 +23,7 @@ FROM http(( -- Test HTTP POST request with JSON body SELECT status FROM http_post( - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/post', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/post', '{"test": "data"}', 'application/json' ); @@ -34,7 +34,7 @@ SELECT status FROM http_post( -- Test HTTP PUT request SELECT status FROM http_put( - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/put', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/put', '{"update": "data"}', 'application/json' ); @@ -44,7 +44,7 @@ SELECT status FROM http_put( (1 row) -- Test HTTP DELETE request -SELECT status FROM http_delete('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delete'); +SELECT status FROM http_delete('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delete'); status -------- 200 @@ -52,7 +52,7 @@ SELECT status FROM http_delete('http://localhost:' || (SELECT value FROM test_co -- Test HTTP PATCH request SELECT status FROM http_patch( - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/patch', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/patch', '{"patch": "data"}', 'application/json' ); @@ -62,7 +62,7 @@ SELECT status FROM http_patch( (1 row) -- Test HTTP HEAD request -SELECT status FROM http_head('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); +SELECT status FROM http_head('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); status -------- 200 @@ -70,7 +70,7 @@ SELECT status FROM http_head('http://localhost:' || (SELECT value FROM test_conf -- Test response headers parsing WITH response AS ( - SELECT * FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/response-headers?Content-Type=text/plain') + SELECT * FROM http_get('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/response-headers?Content-Type=text/plain') ) SELECT status, @@ -86,7 +86,7 @@ FROM response; -- This should complete successfully with reasonable timeout SELECT status FROM http(( 'GET', - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delay/1', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delay/1', ARRAY[]::http_header[], 'application/json', 2000 -- 2 second timeout @@ -97,7 +97,7 @@ SELECT status FROM http(( (1 row) -- Test URL encoding -SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/anything?param=value%20with%20spaces&another=123'); +SELECT status FROM http_get('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/anything?param=value%20with%20spaces&another=123'); status -------- 200 diff --git a/nix/tests/sql/http.sql b/nix/tests/sql/http.sql index df80feb52..34d21b48e 100644 --- a/nix/tests/sql/http.sql +++ b/nix/tests/sql/http.sql @@ -2,13 +2,13 @@ -- Basic HTTP functionality tests -- Test basic HTTP GET request -SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); +SELECT status FROM http_get('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); -- Test HTTP GET with headers SELECT status, content_type FROM http(( 'GET', - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/headers', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/headers', ARRAY[http_header('User-Agent', 'pg_http_test')], NULL, NULL @@ -16,34 +16,34 @@ FROM http(( -- Test HTTP POST request with JSON body SELECT status FROM http_post( - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/post', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/post', '{"test": "data"}', 'application/json' ); -- Test HTTP PUT request SELECT status FROM http_put( - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/put', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/put', '{"update": "data"}', 'application/json' ); -- Test HTTP DELETE request -SELECT status FROM http_delete('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delete'); +SELECT status FROM http_delete('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delete'); -- Test HTTP PATCH request SELECT status FROM http_patch( - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/patch', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/patch', '{"patch": "data"}', 'application/json' ); -- Test HTTP HEAD request -SELECT status FROM http_head('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); +SELECT status FROM http_head('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/get'); -- Test response headers parsing WITH response AS ( - SELECT * FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/response-headers?Content-Type=text/plain') + SELECT * FROM http_get('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/response-headers?Content-Type=text/plain') ) SELECT status, @@ -55,11 +55,11 @@ FROM response; -- This should complete successfully with reasonable timeout SELECT status FROM http(( 'GET', - 'http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delay/1', + 'http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/delay/1', ARRAY[]::http_header[], 'application/json', 2000 -- 2 second timeout )::http_request); -- Test URL encoding -SELECT status FROM http_get('http://localhost:' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/anything?param=value%20with%20spaces&another=123'); +SELECT status FROM http_get('http://' || (SELECT value FROM test_config WHERE key = 'http_mock_host') || ':' || (SELECT value FROM test_config WHERE key = 'http_mock_port') || '/anything?param=value%20with%20spaces&another=123'); diff --git a/test-docker-image.sh b/test-docker-image.sh index 5204d7931..0258ec246 100755 --- a/test-docker-image.sh +++ b/test-docker-image.sh @@ -24,6 +24,7 @@ POSTGRES_DB="postgres" POSTGRES_PASSWORD="postgres" OUTPUT_DIR="" HTTP_MOCK_PORT="" +HTTP_MOCK_PID="" # Colors for output RED='\033[0;31m' @@ -167,10 +168,15 @@ get_test_list() { # Cleanup function cleanup() { # since this function is set as the trap for EXIT - # store the return code of the last command that + # store the return code of the last command that # was executed before said EXIT local exit_code=$? + # Kill HTTP mock server if running on host + if [[ -n "$HTTP_MOCK_PID" ]]; then + kill "$HTTP_MOCK_PID" 2>/dev/null || true + fi + if [[ -n "$CONTAINER_NAME" ]] && [[ "$KEEP_CONTAINER" != "true" ]]; then log_info "Cleaning up container $CONTAINER_NAME..." docker rm -f "$CONTAINER_NAME" 2>/dev/null || true @@ -190,6 +196,7 @@ cleanup() { trap cleanup EXIT # Wait for postgres to be ready +# Requires PG_ISREADY_PATH to be set to the pg_isready binary wait_for_postgres() { local host="$1" local port="$2" @@ -199,7 +206,7 @@ wait_for_postgres() { log_info "Waiting for PostgreSQL to be ready..." while [[ $attempt -le $max_attempts ]]; do - if pg_isready -h "$host" -p "$port" -U "$POSTGRES_USER" -q 2>/dev/null; then + if "$PG_ISREADY_PATH" -h "$host" -p "$port" -U "$POSTGRES_USER" -q 2>/dev/null; then log_info "PostgreSQL is ready" return 0 fi @@ -280,22 +287,7 @@ main() { fi fi - # Start container - log_info "Starting container $CONTAINER_NAME..." - docker run -d \ - --name "$CONTAINER_NAME" \ - -e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ - -p "$PORT:5432" \ - "$IMAGE_TAG" - - # Wait for postgres - if ! wait_for_postgres "localhost" "$PORT"; then - log_error "Container logs:" - docker logs "$CONTAINER_NAME" - exit 1 - fi - - # Get psql and pg_regress from Nix + # Get psql, pg_isready, and pg_regress from Nix (needed before starting container) log_info "Setting up Nix environment..." # Determine psql binary path based on version @@ -307,7 +299,10 @@ main() { esac # Build the required Nix packages - PSQL_PATH=$(nix build --no-link --print-out-paths ".#${nix_psql_attr}")/bin/psql + local nix_bin_path + nix_bin_path=$(nix build --no-link --print-out-paths ".#${nix_psql_attr}")/bin + PSQL_PATH="${nix_bin_path}/psql" + PG_ISREADY_PATH="${nix_bin_path}/pg_isready" PG_REGRESS_PATH=$(nix build --no-link --print-out-paths ".#pg_regress")/bin/pg_regress if [[ ! -x "$PSQL_PATH" ]]; then @@ -315,27 +310,59 @@ main() { exit 1 fi + if [[ ! -x "$PG_ISREADY_PATH" ]]; then + log_error "Failed to get pg_isready from Nix" + exit 1 + fi + if [[ ! -x "$PG_REGRESS_PATH" ]]; then log_error "Failed to get pg_regress from Nix" exit 1 fi log_info "Using psql: $PSQL_PATH" + log_info "Using pg_isready: $PG_ISREADY_PATH" log_info "Using pg_regress: $PG_REGRESS_PATH" - # Start HTTP mock server inside the container - log_info "Starting HTTP mock server inside container..." + # Start container + log_info "Starting container $CONTAINER_NAME..." + docker run -d \ + --name "$CONTAINER_NAME" \ + -e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ + -p "$PORT:5432" \ + "$IMAGE_TAG" - # Copy mock server script into container - docker cp "$HTTP_MOCK_SERVER" "$CONTAINER_NAME:/tmp/http-mock-server.py" + # Wait for postgres + if ! wait_for_postgres "localhost" "$PORT"; then + log_error "Container logs:" + docker logs "$CONTAINER_NAME" + exit 1 + fi - # Start mock server in container background + # Start HTTP mock server on host (accessible from container via host.docker.internal) + log_info "Starting HTTP mock server on host..." HTTP_MOCK_PORT=8880 - docker exec -d "$CONTAINER_NAME" python3 /tmp/http-mock-server.py $HTTP_MOCK_PORT + + # Start mock server on host in background + python3 "$HTTP_MOCK_SERVER" $HTTP_MOCK_PORT & + HTTP_MOCK_PID=$! # Wait for mock server to be ready sleep 2 - log_info "HTTP mock server started on port $HTTP_MOCK_PORT (inside container)" + if ! kill -0 "$HTTP_MOCK_PID" 2>/dev/null; then + log_error "HTTP mock server failed to start" + exit 1 + fi + log_info "HTTP mock server started on host port $HTTP_MOCK_PORT (PID: $HTTP_MOCK_PID)" + + # Determine host address accessible from container + # On Docker Desktop (macOS/Windows): host.docker.internal + # On Linux: use the gateway IP from docker network + HTTP_MOCK_HOST="host.docker.internal" + if [[ "$(uname)" == "Linux" ]]; then + HTTP_MOCK_HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}' "$CONTAINER_NAME") + fi + log_info "Container will access mock server at $HTTP_MOCK_HOST:$HTTP_MOCK_PORT" # Run prime.sql to enable extensions log_info "Running prime.sql to enable extensions..." @@ -351,7 +378,7 @@ main() { exit 1 fi - # Create test_config table with HTTP mock port + # Create test_config table with HTTP mock host and port log_info "Creating test_config table..." PGPASSWORD="$POSTGRES_PASSWORD" "$PSQL_PATH" \ -h localhost \ @@ -360,6 +387,8 @@ main() { -d "$POSTGRES_DB" \ -c "CREATE TABLE IF NOT EXISTS test_config (key TEXT PRIMARY KEY, value TEXT); INSERT INTO test_config (key, value) VALUES ('http_mock_port', '$HTTP_MOCK_PORT') + ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; + INSERT INTO test_config (key, value) VALUES ('http_mock_host', '$HTTP_MOCK_HOST') ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value;" # Get filtered test list