diff --git a/.coderabbit.yaml b/.coderabbit.yaml deleted file mode 100644 index 2220649caa28..000000000000 --- a/.coderabbit.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json -# Disable CodeRabbit auto-review to prevent verbose comments on PRs. -# When enabled: false, CodeRabbit won't attempt reviews and won't post -# "Review skipped" or other automated comments. -reviews: - auto_review: - enabled: false - review_status: false - high_level_summary: false - poem: false - sequence_diagrams: false - changed_files_summary: false - tools: - github-checks: - enabled: false -chat: - art: false - auto_reply: false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 763c5f27ee6b..000000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers. -# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications. -# -# Merge permissions are required for maintaining an entry in this file. -# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/ - -# Default reviewers if nothing else matches -* @edolstra - -# This file -.github/CODEOWNERS @edolstra - -# Documentation of built-in functions -src/libexpr/primops.cc @roberth - -# Libstore layer -/src/libstore @ericson2314 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index af94c3e9e5bb..08a5851748d4 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,10 +1,9 @@ --- name: Bug report about: Report unexpected or incorrect behaviour -title: '' +title: "" labels: bug -assignees: '' - +assignees: "" --- ## Describe the bug @@ -32,7 +31,9 @@ assignees: '' ## Metadata - + + + ## Additional context @@ -42,13 +43,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) +- [ ] checked [latest Determinate Nix manual] \([source]) - [ ] checked [open bug issues and pull requests] for possible duplicates -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open bug issues and pull requests]: https://github.com/NixOS/nix/labels/bug - ---- - -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index fe9f9dd209d4..b88e10937988 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,10 +1,9 @@ --- name: Feature request about: Suggest a new feature -title: '' +title: "" labels: feature -assignees: '' - +assignees: "" --- ## Is your feature request related to a problem? @@ -27,13 +26,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open feature issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open feature issues and pull requests]: https://github.com/NixOS/nix/labels/feature - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/installer.md b/.github/ISSUE_TEMPLATE/installer.md index 070e0bd9b25b..430bef971aac 100644 --- a/.github/ISSUE_TEMPLATE/installer.md +++ b/.github/ISSUE_TEMPLATE/installer.md @@ -1,18 +1,17 @@ --- name: Installer issue about: Report problems with installation -title: '' +title: "" labels: installer -assignees: '' - +assignees: "" --- ## Platform - + -- [ ] Linux: - [ ] macOS +- [ ] Linux: - [ ] WSL ## Additional information @@ -35,13 +34,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open installer issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open installer issues and pull requests]: https://github.com/NixOS/nix/labels/installer - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/missing_documentation.md b/.github/ISSUE_TEMPLATE/missing_documentation.md index 4e05b626d398..fcdd0d20135e 100644 --- a/.github/ISSUE_TEMPLATE/missing_documentation.md +++ b/.github/ISSUE_TEMPLATE/missing_documentation.md @@ -1,10 +1,9 @@ --- name: Missing or incorrect documentation about: Help us improve the reference manual -title: '' +title: "" labels: documentation -assignees: '' - +assignees: "" --- ## Problem @@ -19,13 +18,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open documentation issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open documentation issues and pull requests]: https://github.com/NixOS/nix/labels/documentation - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c155bf8bfa4f..d3e1f8177364 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,26 +1,3 @@ - - ## Motivation @@ -34,9 +11,3 @@ PR stuck in review? We have two Nix team meetings per week online that are open - ---- - -Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc). - -The Nix maintainer team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19) to [schedule and track reviews](https://github.com/NixOS/nix/tree/master/maintainers#project-board-protocol). diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md index bc0005413f1a..281d0f79a8b7 100644 --- a/.github/STALE-BOT.md +++ b/.github/STALE-BOT.md @@ -2,34 +2,21 @@ - Thanks for your contribution! - To remove the stale label, just leave a new comment. -- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) -- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #users:nixos.org](https://matrix.to/#/#users:nixos.org). +- You can always ask for help on [Discord](https://determinate.systems/discord). ## Suggestions for PRs -1. GitHub sometimes doesn't notify people who commented / reviewed a PR previously, when you (force) push commits. If you have addressed the reviews you can [officially ask for a review](https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from those who commented to you or anyone else. -2. If it is unfinished but you plan to finish it, please mark it as a draft. -3. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. -4. To get things rolling again, rebase the PR against the target branch and address valid comments. -5. If you need a review to move forward, ask in [the Discourse thread for PRs that need help](https://discourse.nixos.org/t/prs-in-distress/3604). -6. If all you need is a merge, check the git history to find and [request reviews](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from people who usually merge related contributions. +1. If it is unfinished but you plan to finish it, please mark it as a draft. +1. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. +1. To get things rolling again, rebase the PR against the target branch and address valid comments. +1. If you need a review to move forward, ask in [Discord](https://determinate.systems/discord). ## Suggestions for issues 1. If it is resolved (either for you personally, or in general), please consider closing it. 2. If this might still be an issue, but you are not interested in promoting its resolution, please consider closing it while encouraging others to take over and reopen an issue if they care enough. -3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [our Discourse Forum](https://discourse.nixos.org/). -4. As with all open source projects, your best option is to submit a Pull Request that addresses this issue. We :heart: this attitude! +3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [Discord](https://determinate.systems/discord). **Memorandum on closing issues** Don't be afraid to close an issue that holds valuable information. Closed issues stay in the system for people to search, read, cross-reference, or even reopen--nothing is lost! Closing obsolete issues is an important way to help maintainers focus their time and effort. - -## Useful GitHub search queries - -- [Open PRs with any stale-bot interaction](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open PRs with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22) -- [Open PRs with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open Issues with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) diff --git a/.github/actions/install-nix-action/action.yaml b/.github/actions/install-nix-action/action.yaml deleted file mode 100644 index 535ae9d08fd9..000000000000 --- a/.github/actions/install-nix-action/action.yaml +++ /dev/null @@ -1,124 +0,0 @@ -name: "Install Nix" -description: "Helper action for installing Nix with support for dogfooding from master" -inputs: - dogfood: - description: "Whether to use Nix installed from the latest artifact from master branch" - required: true # Be explicit about the fact that we are using unreleased artifacts - experimental-installer: - description: "Whether to use the experimental installer to install Nix" - default: false - experimental-installer-version: - description: "Version of the experimental installer to use. If `latest`, the newest artifact from the default branch is used." - # TODO: This should probably be pinned to a release after https://github.com/NixOS/experimental-nix-installer/pull/49 lands in one - default: "latest" - extra_nix_config: - description: "Gets appended to `/etc/nix/nix.conf` if passed." - install_url: - description: "URL of the Nix installer" - required: false - default: "https://releases.nixos.org/nix/nix-2.32.1/install" - tarball_url: - description: "URL of the Nix tarball to use with the experimental installer" - required: false - github_token: - description: "Github token" - required: true - use_cache: - description: "Whether to setup github actions cache (not implemented currently)" - default: false - required: false -runs: - using: "composite" - steps: - - name: "Download nix install artifact from master" - shell: bash - id: download-nix-installer - if: inputs.dogfood == 'true' - run: | - RUN_ID=$(gh run list --repo "$DOGFOOD_REPO" --workflow ci.yml --branch master --status success --json databaseId --jq ".[0].databaseId") - - if [ "$RUNNER_OS" == "Linux" ]; then - INSTALLER_ARTIFACT="installer-linux" - elif [ "$RUNNER_OS" == "macOS" ]; then - INSTALLER_ARTIFACT="installer-darwin" - else - echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" - exit 1 - fi - - INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$INSTALLER_ARTIFACT" - mkdir -p "$INSTALLER_DOWNLOAD_DIR" - - gh run download "$RUN_ID" --repo "$DOGFOOD_REPO" -n "$INSTALLER_ARTIFACT" -D "$INSTALLER_DOWNLOAD_DIR" - echo "installer-path=file://$INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" - TARBALL_PATH="$(find "$INSTALLER_DOWNLOAD_DIR" -name 'nix*.tar.xz' -print | head -n 1)" - echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" - - echo "::notice ::Dogfooding Nix installer from master (https://github.com/$DOGFOOD_REPO/actions/runs/$RUN_ID)" - env: - GH_TOKEN: ${{ inputs.github_token }} - DOGFOOD_REPO: "NixOS/nix" - - name: "Gather system info for experimental installer" - shell: bash - if: ${{ inputs.experimental-installer == 'true' }} - run: | - echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" - - if [ "$RUNNER_OS" == "Linux" ]; then - EXPERIMENTAL_INSTALLER_SYSTEM="linux" - echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" - elif [ "$RUNNER_OS" == "macOS" ]; then - EXPERIMENTAL_INSTALLER_SYSTEM="darwin" - echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" - else - echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" - exit 1 - fi - - if [ "$RUNNER_ARCH" == "X64" ]; then - EXPERIMENTAL_INSTALLER_ARCH=x86_64 - echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" - elif [ "$RUNNER_ARCH" == "ARM64" ]; then - EXPERIMENTAL_INSTALLER_ARCH=aarch64 - echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" - else - echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH" - exit 1 - fi - - echo "EXPERIMENTAL_INSTALLER_ARTIFACT=nix-installer-$EXPERIMENTAL_INSTALLER_ARCH-$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" - env: - EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" - - name: "Download latest experimental installer" - shell: bash - id: download-latest-experimental-installer - if: ${{ inputs.experimental-installer == 'true' && inputs.experimental-installer-version == 'latest' }} - run: | - RUN_ID=$(gh run list --repo "$EXPERIMENTAL_INSTALLER_REPO" --workflow ci.yml --branch main --status success --json databaseId --jq ".[0].databaseId") - - EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" - mkdir -p "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" - - gh run download "$RUN_ID" --repo "$EXPERIMENTAL_INSTALLER_REPO" -n "$EXPERIMENTAL_INSTALLER_ARTIFACT" -D "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" - # Executable permissions are lost in artifacts - find $EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR -type f -exec chmod +x {} + - echo "installer-path=$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" - env: - GH_TOKEN: ${{ inputs.github_token }} - EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" - - uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e # v31.5.1 - if: ${{ inputs.experimental-installer != 'true' }} - with: - # Ternary operator in GHA: https://www.github.com/actions/runner/issues/409#issuecomment-752775072 - install_url: ${{ inputs.dogfood == 'true' && format('{0}/install', steps.download-nix-installer.outputs.installer-path) || inputs.install_url }} - install_options: ${{ inputs.dogfood == 'true' && format('--tarball-url-prefix {0}', steps.download-nix-installer.outputs.installer-path) || '' }} - extra_nix_config: ${{ inputs.extra_nix_config }} - - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 - if: ${{ inputs.experimental-installer == 'true' }} - with: - diagnostic-endpoint: "" - # TODO: It'd be nice to use `artifacts.nixos.org` for both of these, maybe through an `/experimental-installer/latest` endpoint? or `/commit/`? - local-root: ${{ inputs.experimental-installer-version == 'latest' && steps.download-latest-experimental-installer.outputs.installer-path || '' }} - source-url: ${{ inputs.experimental-installer-version != 'latest' && 'https://artifacts.nixos.org/experimental-installer/tag/${{ inputs.experimental-installer-version }}/${{ env.EXPERIMENTAL_INSTALLER_ARTIFACT }}' || '' }} - nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }} - extra-conf: ${{ inputs.extra_nix_config }} diff --git a/.github/release-notes.sh b/.github/release-notes.sh new file mode 100755 index 000000000000..f641e146d2e8 --- /dev/null +++ b/.github/release-notes.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# SC2002 disables "useless cat" warnings. +# I prefer pipelines that start with an explicit input, and go from there. +# Overly fussy. +# shellcheck disable=SC2002 + +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) +finish() { + rm -rf "$scratch" +} +trap finish EXIT + +DATE=$(date +%Y-%m-%d) +DETERMINATE_NIX_VERSION=$(cat .version-determinate) +TAG_NAME="v${DETERMINATE_NIX_VERSION}" +NIX_VERSION=$(cat .version) +NIX_VERSION_MAJOR_MINOR=$(echo "$NIX_VERSION" | cut -d. -f1,2) +GITHUB_REPOSITORY="${GITHUB_REPOSITORY:-DeterminateSystems/nix-src}" + +gh api "/repos/${GITHUB_REPOSITORY}/releases/generate-notes" \ + -f "tag_name=${TAG_NAME}" > "$scratch/notes.json" + +trim_trailing_newlines() { + local text + text="$(cat)" + echo -n "${text}" +} + +linkify_gh() { + sed \ + -e 's!\(https://github.com/DeterminateSystems/nix-src/\(pull\|issue\)/\([[:digit:]]\+\)\)![DeterminateSystems/nix-src#\3](\1)!' \ + -e 's#\(https://github.com/DeterminateSystems/nix-src/compare/\([^ ]\+\)\)#[\2](\1)#' +} + +( + cat doc/manual/source/release-notes-determinate/changes.md \ + | sed 's/^.*\(\)$/This section lists the differences between upstream Nix '"$NIX_VERSION_MAJOR_MINOR"' and Determinate Nix '"$DETERMINATE_NIX_VERSION"'.\1/' \ + + printf "\n\n" "$DETERMINATE_NIX_VERSION" + cat "$scratch/notes.json" \ + | jq -r .body \ + | grep -v '^#' \ + | grep -v "Full Changelog" \ + | trim_trailing_newlines \ + | sed -e 's/^\* /\n* /' \ + | linkify_gh + echo "" # final newline +) > "$scratch/changes.md" + +( + printf "# Release %s (%s)\n\n" \ + "$DETERMINATE_NIX_VERSION" \ + "$DATE" + printf "* Based on [upstream Nix %s](../release-notes/rl-%s.md).\n\n" \ + "$NIX_VERSION" \ + "$NIX_VERSION_MAJOR_MINOR" + + cat "$scratch/notes.json" | jq -r .body | linkify_gh +) > "$scratch/rl.md" + +( + cat doc/manual/source/SUMMARY.md.in \ + | sed 's/\(\)$/\1\n - [Release '"$DETERMINATE_NIX_VERSION"' ('"$DATE"')](release-notes-determinate\/'"$TAG_NAME"'.md)/' +) > "$scratch/summary.md" + +mv "$scratch/changes.md" doc/manual/source/release-notes-determinate/changes.md +mv "$scratch/rl.md" "doc/manual/source/release-notes-determinate/v${DETERMINATE_NIX_VERSION}.md" +mv "$scratch/summary.md" doc/manual/source/SUMMARY.md.in diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml deleted file mode 100644 index 7bb3f1c392d5..000000000000 --- a/.github/workflows/backport.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Backport -on: - pull_request_target: - types: [closed, labeled] -permissions: - contents: read -jobs: - backport: - name: Backport Pull Request - permissions: - # for korthout/backport-action - contents: write - pull-requests: write - if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) - runs-on: ubuntu-24.04-arm - steps: - - name: Generate GitHub App token - id: generate-token - uses: actions/create-github-app-token@v2 - with: - app-id: ${{ vars.CI_APP_ID }} - private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} - - uses: actions/checkout@v6 - with: - ref: ${{ github.event.pull_request.head.sha }} - # required to find all branches - fetch-depth: 0 - - name: Create backport PRs - uses: korthout/backport-action@01619ebc9a6e3f6820274221b9956b3e7365000a # v4.1.0 - id: backport - with: - # Config README: https://github.com/korthout/backport-action#backport-action - github_token: ${{ steps.generate-token.outputs.token }} - github_workspace: ${{ github.workspace }} - auto_merge_enabled: true - pull_description: |- - Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000000..c10c3ee00892 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,277 @@ +on: + workflow_call: + inputs: + system: + required: true + type: string + runner: + required: true + type: string + runner_for_virt: + required: true + type: string + runner_small: + required: true + type: string + if: + required: false + default: true + type: boolean + run_tests: + required: false + default: true + type: boolean + run_vm_tests: + required: false + default: false + type: boolean + run_regression_tests: + required: false + default: false + type: boolean + publish_manual: + required: false + default: false + type: boolean + secrets: + manual_netlify_auth_token: + required: false + manual_netlify_site_id: + required: false + sentry_auth_token: + required: false + sentry_org: + required: false + sentry_project: + required: false + +jobs: + build: + if: ${{ inputs.if }} + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix build .#packages.${{ inputs.system }}.default .#packages.${{ inputs.system }}.binaryTarball --no-link -L + - run: nix build .#packages.${{ inputs.system }}.binaryTarball --out-link tarball + - run: nix build .#^debug,out + - name: Upload debug info to Sentry + run: ./maintainers/upload-debug-info-to-sentry.py --debug-dir ./result-debug ./result/bin/nix + if: env.SENTRY_AUTH_TOKEN != '' + env: + SENTRY_AUTH_TOKEN: ${{ secrets.sentry_auth_token }} + SENTRY_ORG: ${{ secrets.sentry_org }} + SENTRY_PROJECT: ${{ secrets.sentry_project }} + - uses: actions/upload-artifact@v6 + with: + name: ${{ inputs.system }} + path: ./tarball/*.xz + + build_static: + if: ${{ inputs.if }} + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix build .#packages.${{ inputs.system }}.nix-cli-static --no-link -L + + test: + if: ${{ inputs.if && inputs.run_tests}} + needs: build + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix flake check -L --system ${{ inputs.system }} + + vm_tests_smoke: + if: inputs.run_vm_tests && github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + nix build -L \ + .#hydraJobs.tests.functional_user \ + .#hydraJobs.tests.githubFlakes \ + .#hydraJobs.tests.nix-docker \ + .#hydraJobs.tests.tarballFlakes \ + ; + + vm_tests_all: + if: inputs.run_vm_tests && github.event_name == 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + cmd() { + nix build -L --keep-going --timeout 600 \ + $(nix flake show --json \ + | jq -r ' + .hydraJobs.tests + | with_entries(select(.value.type == "derivation")) + | keys[] + | ".#hydraJobs.tests." + .') + } + + if ! cmd; then + echo "failed, retrying once ..." + printf "\n\n\n\n\n\n\n\n" + cmd + fi + + flake_regressions: + if: | + (inputs.run_regression_tests && github.event_name == 'merge_group') + || ( + inputs.run_regression_tests + && github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'flake-regression-test') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'flake-regression-test')) + ) + ) + needs: build + runs-on: ${{ inputs.runner }} + strategy: + matrix: + nix_config: + - "lazy-trees = true" + - "lazy-trees = false" + - "eval-cores = 24" + glob: + - "[0-9]*" + - "[a-b]*" + - "[c]*" + - "[d]*" + - "[e]*" + - "[f]*" + - "[g-h]*" + - "[i-k]*" + - "[l]*" + - "[m]*" + - "[n]*" + - "[o-s]*" + - "[t]*" + - "[u-z]*" + + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - name: Checkout flake-regressions + uses: actions/checkout@v4 + with: + repository: NixOS/flake-regressions + path: flake-regressions + - name: Checkout flake-regressions-data + uses: actions/checkout@v4 + with: + repository: NixOS/flake-regressions-data + path: flake-regressions/tests + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Run flake regression tests + env: + #PARALLEL: ${{ !contains(matrix.nix_config, 'eval-cores') && '-P 50%' || '-P 1' }} + PARALLEL: '-P 1' + FLAKE_REGRESSION_GLOB: ${{ matrix.glob }} + NIX_CONFIG: ${{ matrix.nix_config }} + PREFETCH: "1" + USE_NIX_FLAKE_SHOW: "1" + run: | + set -x + echo "PARALLEL: $PARALLEL" + echo "NIX_CONFIG: $NIX_CONFIG" + if [ ! -z "${NSC_CACHE_PATH:-}" ]; then + mkdir -p "${NSC_CACHE_PATH}/nix/xdg-cache" + export XDG_CACHE_HOME="${NSC_CACHE_PATH}/nix/xdg-cache" + fi + nix build -L --out-link ./new-nix + export PATH=$(pwd)/new-nix/bin:$PATH + [[ $(type -p nix) = $(pwd)/new-nix/bin/nix ]] + + nix config show lazy-trees + nix config show eval-cores + lscpu + nproc + + if ! flake-regressions/eval-all.sh; then + echo "Some failed, trying again" + printf "\n\n\n\n\n\n\n\n" + NIX_REMOTE=/tmp/nix flake-regressions/eval-all.sh + fi + + manual: + if: github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_small }} + permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Build manual + if: inputs.system == 'x86_64-linux' + run: nix build .#hydraJobs.manual + - uses: nwtgck/actions-netlify@v3.0 + if: inputs.publish_manual && inputs.system == 'x86_64-linux' + with: + publish-dir: "./result/share/doc/nix/manual" + production-branch: main + github-token: ${{ secrets.GITHUB_TOKEN }} + deploy-message: "Deploy from GitHub Actions" + # NOTE(cole-h): We have a perpetual PR displaying our changes against upstream open, but + # its conversation is locked, so this PR comment can never be posted. + # https://github.com/DeterminateSystems/nix-src/pull/165 + enable-pull-request-comment: ${{ github.event.pull_request.number != 165 }} + enable-commit-comment: true + enable-commit-status: true + overwrites-pull-request-comment: true + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.manual_netlify_auth_token }} + NETLIFY_SITE_ID: ${{ secrets.manual_netlify_site_id }} + + success: + needs: + - build + - test + - vm_tests_smoke + - vm_tests_all + - flake_regressions + - manual + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" + exit 1 + env: + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e37193966a25..132f559ce16a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,269 +2,169 @@ name: "CI" on: pull_request: - merge_group: push: branches: + # NOTE: make sure any branches here are also valid directory names, + # otherwise creating the directory and uploading to s3 will fail + - main - master - workflow_dispatch: - inputs: - dogfood: - description: 'Use dogfood Nix build' - required: false - default: true - type: boolean - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true + merge_group: + release: + types: + - published -permissions: read-all +permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" jobs: eval: - runs-on: ubuntu-24.04 + runs-on: UbuntuLatest32Cores128G steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - use_cache: false - - run: nix flake show --all-systems --json - - pre-commit-checks: - name: pre-commit checks - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - - uses: ./.github/actions/install-nix-action + - uses: actions/checkout@v4 with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: ./ci/gha/tests/pre-commit-checks + fetch-depth: 0 + - uses: DeterminateSystems/determinate-nix-action@main + - run: nix flake show --all-systems --json + + build_x86_64-linux: + uses: ./.github/workflows/build.yml + with: + system: x86_64-linux + runner: namespace-profile-linuxamd32c64g-cache + runner_for_virt: UbuntuLatest32Cores128G + runner_small: ubuntu-latest + run_tests: true + run_vm_tests: true + run_regression_tests: true + publish_manual: true + secrets: + manual_netlify_auth_token: ${{ secrets.NETLIFY_AUTH_TOKEN }} + manual_netlify_site_id: ${{ secrets.NETLIFY_SITE_ID }} + sentry_auth_token: ${{ secrets.SENTRY_AUTH_TOKEN }} + sentry_org: ${{ secrets.SENTRY_ORG }} + sentry_project: ${{ secrets.SENTRY_PROJECT }} - basic-checks: - name: aggregate basic checks + build_aarch64-linux: + uses: ./.github/workflows/build.yml + with: + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + system: aarch64-linux + runner: UbuntuLatest32Cores128GArm + runner_for_virt: UbuntuLatest32Cores128GArm + runner_small: UbuntuLatest32Cores128GArm + secrets: + sentry_auth_token: ${{ secrets.SENTRY_AUTH_TOKEN }} + sentry_org: ${{ secrets.SENTRY_ORG }} + sentry_project: ${{ secrets.SENTRY_PROJECT }} + + build_aarch64-darwin: + uses: ./.github/workflows/build.yml + with: + system: aarch64-darwin + runner: namespace-profile-mac-m2-12c28g + runner_for_virt: namespace-profile-mac-m2-12c28g + runner_small: macos-latest-xlarge + secrets: + sentry_auth_token: ${{ secrets.SENTRY_AUTH_TOKEN }} + sentry_org: ${{ secrets.SENTRY_ORG }} + sentry_project: ${{ secrets.SENTRY_PROJECT }} + + success: + runs-on: ubuntu-latest + needs: + - eval + - build_x86_64-linux + - build_aarch64-linux + - build_aarch64-darwin if: ${{ always() }} - runs-on: ubuntu-24.04 - needs: [pre-commit-checks, eval] steps: - - name: Exit with any errors - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} - run: | + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" exit 1 + env: + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') - tests: - needs: basic-checks - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - instrumented: false - primary: true - stdenv: stdenv - - scenario: on macos - runs-on: macos-14 - os: darwin - instrumented: false - primary: true - stdenv: stdenv - - scenario: on ubuntu (with sanitizers / coverage) - runs-on: ubuntu-24.04 - os: linux - instrumented: true - primary: false - stdenv: clangStdenv - name: tests ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - timeout-minutes: 60 - steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - # The sandbox would otherwise be disabled by default on Darwin - extra_nix_config: "sandbox = true" - # Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to map to the root user: - # https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces - - run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 - if: matrix.os == 'linux' - - name: Run component tests - run: | - nix build --file ci/gha/tests/wrapper.nix componentTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - - name: Run VM tests - run: | - nix build --file ci/gha/tests/wrapper.nix vmTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - if: ${{ matrix.os == 'linux' }} - - name: Run flake checks and prepare the installer tarball - run: | - ci/gha/tests/build-checks - ci/gha/tests/prepare-installer-for-github-actions - if: ${{ matrix.primary }} - - name: Collect code coverage - run: | - nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" \ - --out-link coverage-reports - cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY - if: ${{ matrix.instrumented }} - - name: Upload coverage reports - uses: actions/upload-artifact@v6 - with: - name: coverage-reports - path: coverage-reports/ - if: ${{ matrix.instrumented }} - - name: Upload installer tarball - uses: actions/upload-artifact@v6 - with: - name: installer-${{matrix.os}} - path: out/* - if: ${{ matrix.primary }} + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main - windows_tests: - needs: basic-checks - name: windows unit tests - runs-on: ubuntu-24.04 - continue-on-error: true - timeout-minutes: 60 - steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - - name: Run Windows unit tests - run: | - nix build --file ci/gha/tests/windows.nix unitTests.nix-util-tests -L + - name: Create artifacts directory + run: mkdir -p ./artifacts - installer_test: - needs: [tests] - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - experimental-installer: false - - scenario: on macos - runs-on: macos-14 - os: darwin - experimental-installer: false - - scenario: on ubuntu (experimental) - runs-on: ubuntu-24.04 - os: linux - experimental-installer: true - - scenario: on macos (experimental) - runs-on: macos-14 - os: darwin - experimental-installer: true - name: installer test ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - steps: - - uses: actions/checkout@v6 - - name: Download installer tarball - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - with: - name: installer-${{matrix.os}} - path: out - - name: Looking up the installer tarball URL - id: installer-tarball-url - run: | - echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)" - echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@2126ae7fc54c9df00dd18f7f18754393182c73cd # v31.9.1 - if: ${{ !matrix.experimental-installer }} - with: - install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} - install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} - - uses: ./.github/actions/install-nix-action - if: ${{ matrix.experimental-installer }} - with: - dogfood: false - experimental-installer: true - tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: sudo apt install fish zsh - if: matrix.os == 'linux' - - run: brew install fish - if: matrix.os == 'darwin' - - run: exec bash -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec sh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec zsh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec fish -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec bash -c "nix-channel --add https://releases.nixos.org/nixos/unstable/nixos-23.05pre466020.60c1d71f2ba nixpkgs" - - run: exec bash -c "nix-channel --update && nix-env -iA nixpkgs.hello && hello" - - flake_regressions: - needs: tests - runs-on: ubuntu-24.04 - steps: - - name: Checkout nix - uses: actions/checkout@v6 - - name: Checkout flake-regressions - uses: actions/checkout@v6 - with: - repository: NixOS/flake-regressions - path: flake-regressions - - name: Checkout flake-regressions-data - uses: actions/checkout@v6 - with: - repository: NixOS/flake-regressions-data - path: flake-regressions/tests - - name: Download installer tarball - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 + - name: Fetch artifacts + uses: actions/download-artifact@v7 with: - name: installer-linux - path: out - - name: Looking up the installer tarball URL - id: installer-tarball-url + path: downloaded + - name: Move downloaded artifacts to artifacts directory run: | - echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@2126ae7fc54c9df00dd18f7f18754393182c73cd # v31.9.1 + for dir in ./downloaded/*; do + arch="$(basename "$dir")" + mv "$dir"/*.xz ./artifacts/"${arch}" + done + + - name: Build fallback-paths.nix + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + run: | + nix build .#fallbackPathsNix --out-link fallback + cat fallback > ./artifacts/fallback-paths.nix + + - uses: DeterminateSystems/push-artifact-ids@main with: - install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} - install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} - - name: Run flake regressions tests - run: MAX_FLAKES=25 flake-regressions/eval-all.sh + s3_upload_role: ${{ secrets.AWS_S3_UPLOAD_ROLE_ARN }} + bucket: ${{ secrets.AWS_S3_UPLOAD_BUCKET_NAME }} + directory: ./artifacts + ids_project_name: determinate-nix + ids_binary_prefix: determinate-nix + skip_acl: true + allowed_branches: '["main"]' - profile_build: - needs: tests - runs-on: ubuntu-24.04 - timeout-minutes: 60 - if: >- - github.event_name == 'push' && - github.ref_name == 'master' + publish: + needs: + - success + if: (!github.repository.fork && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) || startsWith(github.ref, 'refs/tags/'))) + environment: ${{ github.event_name == 'release' && 'production' || '' }} + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: | - experimental-features = flakes nix-command ca-derivations impure-derivations - max-jobs = 1 - - run: | - nix build -L --file ./ci/gha/profile-build buildTimeReport --out-link build-time-report.md - cat build-time-report.md >> $GITHUB_STEP_SUMMARY + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-push@main + with: + rolling: ${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} + visibility: "public" + tag: "${{ github.ref_name }}" + - name: Update the release notes + if: startsWith(github.ref, 'refs/tags/') + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ github.ref_name }} + run: | + gh release edit "$TAG_NAME" --notes-file doc/manual/source/release-notes-determinate/"$TAG_NAME".md || true diff --git a/.github/workflows/propose-release.yml b/.github/workflows/propose-release.yml new file mode 100644 index 000000000000..ea01e4b7afec --- /dev/null +++ b/.github/workflows/propose-release.yml @@ -0,0 +1,32 @@ +on: + workflow_dispatch: + inputs: + reference-id: + type: string + required: true + version: + type: string + required: true + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + propose-release: + uses: DeterminateSystems/propose-release/.github/workflows/workflow.yml@main + permissions: + id-token: write + contents: write + pull-requests: write + with: + update-flake: false + reference-id: ${{ inputs.reference-id }} + version: ${{ inputs.version }} + extra-commands-early: | + echo ${{ inputs.version }} > .version-determinate + git add .version-determinate + git commit -m "Set .version-determinate to ${{ inputs.version }}" || true + ./.github/release-notes.sh + git add doc + git commit -m "Generate release notes for ${{ inputs.version }}" || true diff --git a/.github/workflows/upload-release.yml b/.github/workflows/upload-release.yml deleted file mode 100644 index 5de9ec7951ce..000000000000 --- a/.github/workflows/upload-release.yml +++ /dev/null @@ -1,80 +0,0 @@ -name: Upload Release -on: - workflow_dispatch: - inputs: - eval_id: - description: "Hydra evaluation ID" - required: true - type: number - is_latest: - description: "Mark as latest release" - required: false - type: boolean - default: false -permissions: - contents: read - id-token: write - packages: write -jobs: - release: - runs-on: ubuntu-24.04 - environment: releases - steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: ./.github/actions/install-nix-action - with: - dogfood: false # Use stable version - use_cache: false # Don't want any cache injection shenanigans - extra_nix_config: | - experimental-features = nix-command flakes - - name: Set NIX_PATH from flake input - run: | - NIXPKGS_PATH=$(nix build --inputs-from .# nixpkgs#path --print-out-paths --no-link) - # Shebangs with perl have issues. Pin nixpkgs this way. nix shell should maybe - # get the same uberhack that nix-shell has to support it. - echo "NIX_PATH=nixpkgs=$NIXPKGS_PATH" >> "$GITHUB_ENV" - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 - with: - role-to-assume: "arn:aws:iam::080433136561:role/nix-release" - role-session-name: nix-release-oidc-${{ github.run_id }} - aws-region: eu-west-1 - - name: Disable containerd image store - run: | - # Docker 28+ defaults to the containerd image store, which - # pushes layers uncompressed instead of gzip. OCI clients - # that only support gzip (e.g. go-containerregistry) fail - # with "gzip: invalid header". Disabling the containerd - # snapshotter restores the classic storage driver, which - # preserves gzip-compressed layers through the - # `docker load` / `docker push` pipeline. - echo '{"features":{"containerd-snapshotter":false}}' | sudo tee /etc/docker/daemon.json > /dev/null - sudo systemctl restart docker - - name: Login to Docker Hub - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Login to GitHub Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Upload release - run: | - ./maintainers/upload-release.pl \ - ${{ inputs.eval_id }} \ - --skip-git - env: - IS_LATEST: ${{ inputs.is_latest && '1' || '' }} - - name: Push to GHCR - run: | - DOCKER_OWNER="ghcr.io/$(echo '${{ github.repository_owner }}' | tr '[A-Z]' '[a-z]')/nix" - ./maintainers/upload-release.pl \ - ${{ inputs.eval_id }} \ - --skip-git \ - --skip-s3 \ - --docker-owner "$DOCKER_OWNER" - env: - IS_LATEST: ${{ inputs.is_latest && '1' || '' }} diff --git a/.version-determinate b/.version-determinate new file mode 100644 index 000000000000..d21858b119b1 --- /dev/null +++ b/.version-determinate @@ -0,0 +1 @@ +3.18.1 diff --git a/README.md b/README.md index 02498944cdb7..c5cbcbed21bb 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,111 @@ -# Nix +

+ +

+

+  Discord  +  Bluesky  +  Mastodon  +  Twitter  +  LinkedIn  +

-[![Open Collective supporters](https://opencollective.com/nixos/tiers/supporter/badge.svg?label=Supporters&color=brightgreen)](https://opencollective.com/nixos) -[![CI](https://github.com/NixOS/nix/workflows/CI/badge.svg)](https://github.com/NixOS/nix/actions/workflows/ci.yml) +# The Determinate Nix CLI -Nix is a powerful package manager for Linux and other Unix systems that makes package -management reliable and reproducible. Please refer to the [Nix manual](https://nix.dev/reference/nix-manual) -for more details. +[![CI](https://github.com/DeterminateSystems/nix-src/workflows/CI/badge.svg)](https://github.com/DeterminateSystems/nix-src/actions/workflows/ci.yml) -## Installation and first steps +**Nix** is a powerful [language], [package manager][package-management], and [build tool][cli] for [macOS](#macos), [Linux](#linux), and other Unix systems. +It enables you to create fully reproducible [development environments][envs], to build [packages] in sandboxed environments, to build entire Linux systems using [NixOS], and much more. -Visit [nix.dev](https://nix.dev) for [installation instructions](https://nix.dev/tutorials/install-nix) and [beginner tutorials](https://nix.dev/tutorials/first-steps). +[**Determinate Nix**][det-nix] is a downstream distribution of [Nix][upstream] created and maintained by [Determinate Systems][detsys]. +It has two components: -Full reference documentation can be found in the [Nix manual](https://nix.dev/reference/nix-manual). +- The Determinate Nix CLI, a distribution of the Nix CLI built from this repository. + It's based on the [upstream Nix CLI][upstream] and continuously rebased against it, but adds a wide variety of [features] and [improvements][changelog]. +- [Determinate Nixd][dnixd] is a useful daemon for Linux and macOS that handles vital tasks like configuration and enterprise certificate management. -## Building and developing +Determinate Nix is built on SOC-2-Type-II-compliant infrastructure using [Determinate Secure Packages][secure-packages], released via a carefully orchestrated process, and, for Determinate Systems customers, backed by formal security response SLAs that meet stringent compliance standards. -Follow instructions in the Nix reference manual to [set up a development environment and build Nix from source](https://nix.dev/manual/nix/development/development/building.html). +> [!NOTE] +> Determinate Nix, by definition, consists of _both_ the components listed above. +> While it's possible to use the code in this repository to run just our downstream Nix CLI, we do _not_ officially support this experience and provide none of the guarantees or SLAs that we provide for Determinate Nix proper. -## Contributing +Determinate Nix is part of the [Determinate platform][determinate], which also includes [FlakeHub], a secure flake repository with features like [FlakeHub Cache][cache], [private flakes][private-flakes], and [semantic versioning][semver] (SemVer) for [flakes]. + +## Installing Determinate Nix + +You can install Determinate Nix on [macOS](#macos), non-NixOS [Linux](#linux) and WSL, and [NixOS](#nixos). + +### macOS + +On macOS, we recommend using the graphical installer from Determinate Systems. +Click [here][gui] to download and run it. + +### Linux + +On Linux, including Windows Subsystem for Linux (WSL), we recommend installing Determinate Nix using [Determinate Nix Installer][installer]: + +```shell +curl -fsSL https://install.determinate.systems/nix | sh -s -- install +``` -Check the [contributing guide](./CONTRIBUTING.md) if you want to get involved with developing Nix. +### NixOS -## Additional resources +On [NixOS], we recommend following our [dedicated installation guide][nixos-install]. +We also provide both [Amazon Machine Images][amis] (AMIs) and [ISOs] for using Determinate on NixOS. -Nix was created by Eelco Dolstra and developed as the subject of his PhD thesis [The Purely Functional Software Deployment Model](https://edolstra.github.io/pubs/phd-thesis.pdf), published 2006. -Today, a world-wide developer community contributes to Nix and the ecosystem that has grown around it. +## Other resources -- [The Nix, Nixpkgs, NixOS Community on nixos.org](https://nixos.org/) -- [Official documentation on nix.dev](https://nix.dev) -- [Nixpkgs](https://github.com/NixOS/nixpkgs) is [the largest, most up-to-date free software repository in the world](https://repology.org/repositories/graphs) -- [NixOS](https://github.com/NixOS/nixpkgs/tree/master/nixos) is a Linux distribution that can be configured fully declaratively -- [Discourse](https://discourse.nixos.org/) -- Matrix: [#users:nixos.org](https://matrix.to/#/#users:nixos.org) for user support and [#nix-dev:nixos.org](https://matrix.to/#/#nix-dev:nixos.org) for development +Nix was created by [Eelco Dolstra][eelco] and developed as the subject of his 2006 PhD thesis, [The Purely Functional Software Deployment Model][thesis]. +Today, a worldwide developer community contributes to Nix and the ecosystem that has grown around it. + +- [Zero to Nix][z2n], Determinate Systems' guide to Nix and [flakes] for beginners +- [Nixpkgs], a collection of well over 100,000 software packages that you can build and manage using Nix +- [NixOS] is a Linux distribution that can be configured fully declaratively +- The Nix, Nixpkgs, and NixOS community on [nixos.org][website] + +## Reference + +The primary documentation for Determinate and Determinate Nix is available at [docs.determinate.systems][determinate]. +For deeply technical reference material, see the [Determinate Nix manual][manual] which is based on the upstream Nix manual. ## License -Nix is released under the [LGPL v2.1](./COPYING). +[Upstream Nix][upstream] is released under the [LGPL v2.1][license] license. +[Determinate Nix][det-nix] is also released under LGPL v2.1 in accordance with the terms of the upstream license. + +## Contributing + +Check the [contributing guide][contributing] if you want to get involved with developing Nix. + +[amis]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html +[cache]: https://docs.determinate.systems/flakehub/cache +[changelog]: https://determinate.systems/blog/categories/changelog +[cli]: https://manual.determinate.systems/command-ref/new-cli/nix.html +[contributing]: ./CONTRIBUTING.md +[det-nix]: https://docs.determinate.systems/determinate-nix +[determinate]: https://docs.determinate.systems +[detsys]: https://determinate.systems +[dnixd]: https://docs.determinate.systems/determinate-nix#determinate-nixd +[eelco]: https://determinate.systems/people/eelco-dolstra +[envs]: https://zero-to-nix.com/concepts/dev-env +[features]: https://docs.determinate.systems/determinate-nix/#special-features +[flakehub]: https://flakehub.com +[flakes]: https://zero-to-nix.com/concepts/flakes +[gui]: https://install.determinate.systems/determinate-pkg/stable/Universal +[installer]: https://github.com/DeterminateSystems/nix-installer +[isos]: https://github.com/DeterminateSystems/nixos-iso +[language]: https://zero-to-nix.com/concepts/nix-language +[license]: ./COPYING +[manual]: https://manual.determinate.systems +[nixpkgs]: https://github.com/NixOS/nixpkgs +[nixos]: https://github.com/NixOS/nixpkgs/tree/master/nixos +[nixos-install]: https://docs.determinate.systems/guides/advanced-installation#nixos +[packages]: https://zero-to-nix.com/concepts/packages +[package-management]: https://zero-to-nix.com/concepts/package-management +[private-flakes]: https://docs.determinate.systems/flakehub/private-flakes +[secure-packages]: https://determinate.systems/secure-packages +[semver]: https://docs.determinate.systems/flakehub/concepts/semver +[thesis]: https://edolstra.github.io/pubs/phd-thesis.pdf +[upstream]: https://github.com/NixOS/nix +[website]: https://nixos.org +[z2n]: https://zero-to-nix.com diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 6100f2f4172e..7d206750fcfe 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -76,13 +76,18 @@ rec { */ topLevel = { installerScriptForGHA = hydraJobs.installerScriptForGHA.${system}; - installTests = hydraJobs.installTests.${system}; nixpkgsLibTests = hydraJobs.tests.nixpkgsLibTests.${system}; + nixpkgsLibTestsLazy = hydraJobs.tests.nixpkgsLibTestsLazy.${system}; rl-next = pkgs.buildPackages.runCommand "test-rl-next-release-notes" { } '' LANG=C.UTF-8 ${pkgs.changelog-d}/bin/changelog-d ${../../../doc/manual/rl-next} >$out ''; repl-completion = pkgs.callPackage ../../../tests/repl-completion.nix { inherit (packages') nix; }; + lazyTrees = nixComponents.nix-functional-tests.override { + pname = "nix-lazy-trees-tests"; + lazyTrees = true; + }; + /** Checks for our packaging expressions. This shouldn't build anything significant; just check that things diff --git a/default.nix b/default.nix deleted file mode 100644 index 6466507b7140..000000000000 --- a/default.nix +++ /dev/null @@ -1,9 +0,0 @@ -(import ( - let - lock = builtins.fromJSON (builtins.readFile ./flake.lock); - in - fetchTarball { - url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; - sha256 = lock.nodes.flake-compat.locked.narHash; - } -) { src = ./.; }).defaultNix diff --git a/doc/manual/book.toml.in b/doc/manual/book.toml.in index c798afc4a8c0..11efca75f110 100644 --- a/doc/manual/book.toml.in +++ b/doc/manual/book.toml.in @@ -1,12 +1,12 @@ [book] -title = "Nix @version@ Reference Manual" +title = "Determinate Nix @version@ Reference Manual" src = "source" [output.html] additional-css = ["custom.css"] additional-js = ["redirects.js"] -edit-url-template = "https://github.com/NixOS/nix/tree/master/doc/manual/{path}" -git-repository-url = "https://github.com/NixOS/nix" +edit-url-template = "https://github.com/DeterminateSystems/nix-src/tree/master/doc/manual/{path}" +git-repository-url = "https://github.com/DeterminateSystems/nix-src" mathjax-support = true # Handles replacing @docroot@ with a path to ./source relative to that markdown file, diff --git a/doc/manual/custom.css b/doc/manual/custom.css index 7af150be391b..119c6d125430 100644 --- a/doc/manual/custom.css +++ b/doc/manual/custom.css @@ -1,5 +1,5 @@ :root { - --sidebar-width: 23em; + --sidebar-width: 23em; } h1.menu-title::before { @@ -7,11 +7,10 @@ h1.menu-title::before { background-image: url("./favicon.svg"); padding: 1.25em; background-position: center center; - background-size: 2em; + background-size: 1.5em; background-repeat: no-repeat; } - .menu-bar { padding: 0.5em 0em; } @@ -21,13 +20,13 @@ h1.menu-title::before { } h1:not(:first-of-type) { - margin-top: 1.3em; + margin-top: 1.3em; } h2 { - margin-top: 1em; + margin-top: 1em; } .hljs-meta { - user-select: none; + user-select: none; } diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix index 31e74e17d264..7b2a02bd3f84 100644 --- a/doc/manual/generate-manpage.nix +++ b/doc/manual/generate-manpage.nix @@ -42,11 +42,6 @@ let let result = '' - > **Warning** \ - > This program is - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and its interface is subject to change. - # Name `${command}` - ${details.description} @@ -81,7 +76,11 @@ let subcommands = if length categories > 1 then listCategories else listSubcommands details.commands; categories = sort (x: y: x.id < y.id) ( - unique (map (cmd: cmd.category) (attrValues details.commands)) + unique ( + map (cmd: { inherit (cmd.category) id description; }) ( + builtins.filter (cmd: cmd.category.id != 103) (attrValues details.commands) + ) + ) ); listCategories = concatStrings (map showCategory categories); @@ -89,7 +88,7 @@ let showCategory = cat: '' **${toString cat.description}:** - ${listSubcommands (filterAttrs (n: v: v.category == cat) details.commands)} + ${listSubcommands (filterAttrs (n: v: v.category.id == cat.id) details.commands)} ''; listSubcommands = cmds: concatStrings (attrValues (mapAttrs showSubcommand cmds)); diff --git a/doc/manual/meson.build b/doc/manual/meson.build index 6fd841e80cbb..4f9a55b515bf 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -5,19 +5,9 @@ project( license : 'LGPL-2.1-or-later', ) -# Compute documentation URL based on version and release type -version = meson.project_version() -official_release = get_option('official-release') +fs = import('fs') -if official_release - # For official releases, use versioned URL (dropping patch version) - version_parts = version.split('.') - major_minor = '@0@.@1@'.format(version_parts[0], version_parts[1]) - doc_url = 'https://nix.dev/manual/nix/@0@'.format(major_minor) -else - # For development builds, use /latest - doc_url = 'https://nix.dev/manual/nix/latest' -endif +doc_url = 'https://manual.determinate.systems/' nix = find_program('nix', native : true) @@ -39,7 +29,7 @@ nix_env_for_docs = { 'NIX_CONFIG' : 'cores = 0', } -nix_for_docs = [ nix, '--experimental-features', 'nix-command' ] +nix_for_docs = [ nix ] nix_eval_for_docs_common = nix_for_docs + [ 'eval', '-I', @@ -141,7 +131,7 @@ if get_option('html-manual') python.full_path(), mdbook.full_path(), meson.current_build_dir(), - meson.project_version(), + fs.read('../../.version-determinate').strip(), ), ], input : [ diff --git a/doc/manual/package.nix b/doc/manual/package.nix index af5e6cf1c229..9e69156ac0ca 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -34,7 +34,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-manual"; + pname = "determinate-nix-manual"; inherit version; workDir = ./.; @@ -42,6 +42,7 @@ mkMesonDerivation (finalAttrs: { fileset.difference (fileset.unions [ ../../.version + ../../.version-determinate # For example JSON ../../src/libutil-tests/data/memory-source-accessor ../../src/libutil-tests/data/hash diff --git a/doc/manual/redirects.json b/doc/manual/redirects.json index 0a6c71508006..07a6f36627ff 100644 --- a/doc/manual/redirects.json +++ b/doc/manual/redirects.json @@ -243,29 +243,11 @@ "gloss-validity": "glossary.html#gloss-validity", "part-glossary": "glossary.html", "sec-building-source": "installation/building-source.html", - "ch-env-variables": "installation/env-variables.html", - "sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables", - "sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file", - "sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file", "chap-installation": "installation/index.html", - "ch-installing-binary": "installation/installing-binary.html", - "sect-macos-installation": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation", - "sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation", - "sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball", - "sect-nix-install-pinned-version-url": - "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url", - "sect-single-user-installation": "installation/installing-binary.html#single-user-installation", "ch-installing-source": "installation/installing-source.html", - "ssec-multi-user": "installation/multi-user.html", "ch-nix-security": "installation/nix-security.html", "sec-obtaining-source": "installation/obtaining-source.html", "sec-prerequisites-source": "installation/prerequisites-source.html", - "sec-single-user": "installation/single-user.html", - "ch-supported-platforms": "installation/supported-platforms.html", "ch-upgrading-nix": "installation/upgrading.html", "ch-about-nix": "introduction.html", "chap-introduction": "introduction.html", @@ -287,43 +269,7 @@ "sec-sharing-packages": "package-management/sharing-packages.html", "ssec-ssh-substituter": "package-management/ssh-substituter.html", "chap-quick-start": "quick-start.html", - "sec-relnotes": "release-notes/index.html", - "ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html", - "ch-relnotes-0.10": "release-notes/rl-0.10.html", - "ssec-relnotes-0.11": "release-notes/rl-0.11.html", - "ssec-relnotes-0.12": "release-notes/rl-0.12.html", - "ssec-relnotes-0.13": "release-notes/rl-0.13.html", - "ssec-relnotes-0.14": "release-notes/rl-0.14.html", - "ssec-relnotes-0.15": "release-notes/rl-0.15.html", - "ssec-relnotes-0.16": "release-notes/rl-0.16.html", - "ch-relnotes-0.5": "release-notes/rl-0.5.html", - "ch-relnotes-0.6": "release-notes/rl-0.6.html", - "ch-relnotes-0.7": "release-notes/rl-0.7.html", - "ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html", - "ch-relnotes-0.8": "release-notes/rl-0.8.html", - "ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html", - "ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html", - "ch-relnotes-0.9": "release-notes/rl-0.9.html", - "ssec-relnotes-1.0": "release-notes/rl-1.0.html", - "ssec-relnotes-1.1": "release-notes/rl-1.1.html", - "ssec-relnotes-1.10": "release-notes/rl-1.10.html", - "ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html", - "ssec-relnotes-1.11": "release-notes/rl-1.11.html", - "ssec-relnotes-1.2": "release-notes/rl-1.2.html", - "ssec-relnotes-1.3": "release-notes/rl-1.3.html", - "ssec-relnotes-1.4": "release-notes/rl-1.4.html", - "ssec-relnotes-1.5.1": "release-notes/rl-1.5.html", - "ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html", - "ssec-relnotes-1.5": "release-notes/rl-1.5.html", - "ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html", - "ssec-relnotes-1.6.0": "release-notes/rl-1.6.html", - "ssec-relnotes-1.7": "release-notes/rl-1.7.html", - "ssec-relnotes-1.8": "release-notes/rl-1.8.html", - "ssec-relnotes-1.9": "release-notes/rl-1.9.html", - "ssec-relnotes-2.0": "release-notes/rl-2.0.html", - "ssec-relnotes-2.1": "release-notes/rl-2.1.html", - "ssec-relnotes-2.2": "release-notes/rl-2.2.html", - "ssec-relnotes-2.3": "release-notes/rl-2.3.html" + "sec-relnotes": "release-notes/index.html" }, "language/types.html": { "simple-values": "#primitives", @@ -340,12 +286,10 @@ "builder-execution": "../store/building.html#builder-execution" }, "installation/installing-binary.html": { - "linux": "uninstall.html#linux", - "macos": "uninstall.html#macos", "uninstalling": "uninstall.html" }, "development/building.html": { - "nix-with-flakes": "#building-nix-with-flakes", + "nix-with-flakes": "#building-nix", "classic-nix": "#building-nix", "running-tests": "testing.html#running-tests", "unit-tests": "testing.html#unit-tests", diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md new file mode 100644 index 000000000000..e87fa5d04fb8 --- /dev/null +++ b/doc/manual/rl-next/shorter-build-dir-names.md @@ -0,0 +1,6 @@ +--- +synopsis: "Temporary build directories no longer include derivation names" +prs: [13839] +--- + +Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 18d24788ca1c..c098386827d6 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -3,17 +3,12 @@ - [Introduction](introduction.md) - [Quick Start](quick-start.md) - [Installation](installation/index.md) - - [Supported Platforms](installation/supported-platforms.md) - - [Installing a Binary Distribution](installation/installing-binary.md) - [Installing Nix from Source](installation/installing-source.md) - [Prerequisites](installation/prerequisites-source.md) - [Obtaining a Source Distribution](installation/obtaining-source.md) - [Building Nix from Source](installation/building-source.md) - [Using Nix within Docker](installation/installing-docker.md) - [Security](installation/nix-security.md) - - [Single-User Mode](installation/single-user.md) - - [Multi-User Mode](installation/multi-user.md) - - [Environment Variables](installation/env-variables.md) - [Upgrading Nix](installation/upgrading.md) - [Uninstalling Nix](installation/uninstall.md) - [Nix Store](store/index.md) @@ -65,8 +60,11 @@ - [Command Reference](command-ref/index.md) - [Common Options](command-ref/opt-common.md) - [Common Environment Variables](command-ref/env-common.md) - - [Main Commands](command-ref/main-commands.md) + - [Subcommands](command-ref/subcommands.md) +{{#include ./command-ref/new-cli/SUMMARY.md}} + - [Deprecated Commands](command-ref/main-commands.md) - [nix-build](command-ref/nix-build.md) + - [nix-channel](command-ref/nix-channel.md) - [nix-shell](command-ref/nix-shell.md) - [nix-store](command-ref/nix-store.md) - [nix-store --add-fixed](command-ref/nix-store/add-fixed.md) @@ -102,22 +100,17 @@ - [nix-env --uninstall](command-ref/nix-env/uninstall.md) - [nix-env --upgrade](command-ref/nix-env/upgrade.md) - [Utilities](command-ref/utilities.md) - - [nix-channel](command-ref/nix-channel.md) - [nix-collect-garbage](command-ref/nix-collect-garbage.md) - [nix-copy-closure](command-ref/nix-copy-closure.md) - [nix-daemon](command-ref/nix-daemon.md) - [nix-hash](command-ref/nix-hash.md) - [nix-instantiate](command-ref/nix-instantiate.md) - [nix-prefetch-url](command-ref/nix-prefetch-url.md) - - [Experimental Commands](command-ref/experimental-commands.md) -{{#include ./command-ref/new-cli/SUMMARY.md}} - [Files](command-ref/files.md) - [nix.conf](command-ref/conf-file.md) - [Profiles](command-ref/files/profiles.md) - [manifest.nix](command-ref/files/manifest.nix.md) - [manifest.json](command-ref/files/manifest.json.md) - - [Channels](command-ref/files/channels.md) - - [Default Nix expression](command-ref/files/default-nix-expression.md) - [Architecture and Design](architecture/architecture.md) - [Formats and Protocols](protocols/index.md) - [JSON Formats](protocols/json/index.md) @@ -138,6 +131,8 @@ - [Nix Cache Info Format](protocols/nix-cache-info.md) - [Derivation "ATerm" file format](protocols/derivation-aterm.md) - [Nix32 Encoding](protocols/nix32.md) + - [`builtins.wasm` Host Interface](protocols/wasm.md) + - [Flake Schemas](protocols/flake-schemas.md) - [C API](c-api.md) - [Glossary](glossary.md) - [Development](development/index.md) @@ -151,7 +146,60 @@ - [C++ style guide](development/cxx.md) - [Experimental Features](development/experimental-features.md) - [Contributing](development/contributing.md) -- [Releases](release-notes/index.md) +- [Determinate Nix Release Notes](release-notes-determinate/index.md) + - [Changes between Nix and Determinate Nix](release-notes-determinate/changes.md) + - [Release 3.18.1 (2026-04-23)](release-notes-determinate/v3.18.1.md) + - [Release 3.18.0 (2026-04-20)](release-notes-determinate/v3.18.0.md) + - [Release 3.17.3 (2026-04-07)](release-notes-determinate/v3.17.3.md) + - [Release 3.17.2 (2026-03-27)](release-notes-determinate/v3.17.2.md) + - [Release 3.17.1 (2026-03-18)](release-notes-determinate/v3.17.1.md) + - [Release 3.17.0 (2026-03-04)](release-notes-determinate/v3.17.0.md) + - [Release 3.16.3 (2026-02-24)](release-notes-determinate/v3.16.3.md) + - [Release 3.16.2 (2026-02-23)](release-notes-determinate/v3.16.2.md) + - [Release 3.16.1 (2026-02-22)](release-notes-determinate/v3.16.1.md) + - [Release 3.16.0 (2026-02-12)](release-notes-determinate/v3.16.0.md) + - [Release 3.15.2 (2026-01-20)](release-notes-determinate/v3.15.2.md) + - [Release 3.15.1 (2025-12-24)](release-notes-determinate/v3.15.1.md) + - [Release 3.15.0 (2025-12-19)](release-notes-determinate/v3.15.0.md) + - [Release 3.14.0 (2025-12-08)](release-notes-determinate/v3.14.0.md) + - [Release 3.13.2 (2025-11-19)](release-notes-determinate/v3.13.2.md) + - [Release 3.13.1 (2025-11-12)](release-notes-determinate/v3.13.1.md) + - [Release 3.13.0 (2025-11-09)](release-notes-determinate/v3.13.0.md) + - [Release 3.12.2 (2025-11-05)](release-notes-determinate/v3.12.2.md) + - [Release 3.12.1 (2025-11-04)](release-notes-determinate/v3.12.1.md) + - [Release 3.12.0 (2025-10-23)](release-notes-determinate/v3.12.0.md) + - [Release 3.11.3 (2025-10-09)](release-notes-determinate/v3.11.3.md) + - [Release 3.11.2 (2025-09-12)](release-notes-determinate/v3.11.2.md) + - [Release 3.11.1 (2025-09-04)](release-notes-determinate/v3.11.1.md) + - [Release 3.11.0 (2025-09-03)](release-notes-determinate/v3.11.0.md) + - [Release 3.10.1 (2025-09-02)](release-notes-determinate/v3.10.1.md) + - [Release 3.10.0 (2025-09-02)](release-notes-determinate/v3.10.0.md) + - [Release 3.9.1 (2025-08-28)](release-notes-determinate/v3.9.1.md) + - [Release 3.9.0 (2025-08-26)](release-notes-determinate/v3.9.0.md) + - [Release 3.8.6 (2025-08-19)](release-notes-determinate/v3.8.6.md) + - [Release 3.8.5 (2025-08-04)](release-notes-determinate/rl-3.8.5.md) + - [Release 3.8.4 (2025-07-21)](release-notes-determinate/rl-3.8.4.md) + - [Release 3.8.3 (2025-07-18)](release-notes-determinate/rl-3.8.3.md) + - [Release 3.8.2 (2025-07-12)](release-notes-determinate/rl-3.8.2.md) + - [Release 3.8.1 (2025-07-11)](release-notes-determinate/rl-3.8.1.md) + - [Release 3.8.0 (2025-07-10)](release-notes-determinate/rl-3.8.0.md) + - [Release 3.7.0 (2025-07-03)](release-notes-determinate/rl-3.7.0.md) + - [Release 3.6.8 (2025-06-25)](release-notes-determinate/rl-3.6.8.md) + - [Release 3.6.7 (2025-06-24)](release-notes-determinate/rl-3.6.7.md) + - [Release 3.6.6 (2025-06-17)](release-notes-determinate/rl-3.6.6.md) + - [Release 3.6.5 (2025-06-16)](release-notes-determinate/rl-3.6.5.md) + - [Release 3.6.2 (2025-06-02)](release-notes-determinate/rl-3.6.2.md) + - [Release 3.6.1 (2025-05-24)](release-notes-determinate/rl-3.6.1.md) + - [Release 3.6.0 (2025-05-22)](release-notes-determinate/rl-3.6.0.md) + - [Release 3.5.2 (2025-05-12)](release-notes-determinate/rl-3.5.2.md) + - [Release 3.5.1 (2025-05-09)](release-notes-determinate/rl-3.5.1.md) + - [~~Release 3.5.0 (2025-05-09)~~](release-notes-determinate/rl-3.5.0.md) + - [Release 3.4.2 (2025-05-05)](release-notes-determinate/rl-3.4.2.md) + - [Release 3.4.0 (2025-04-25)](release-notes-determinate/rl-3.4.0.md) + - [Release 3.3.0 (2025-04-11)](release-notes-determinate/rl-3.3.0.md) + - [Release 3.1.0 (2025-03-27)](release-notes-determinate/rl-3.1.0.md) + - [Release 3.0.0 (2025-03-04)](release-notes-determinate/rl-3.0.0.md) +- [Nix Release Notes](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} - [Release 2.34 (2026-02-27)](release-notes/rl-2.34.md) - [Release 2.33 (2025-12-09)](release-notes/rl-2.33.md) @@ -162,60 +210,3 @@ - [Release 2.28 (2025-04-02)](release-notes/rl-2.28.md) - [Release 2.27 (2025-03-03)](release-notes/rl-2.27.md) - [Release 2.26 (2025-01-22)](release-notes/rl-2.26.md) - - [Release 2.25 (2024-11-07)](release-notes/rl-2.25.md) - - [Release 2.24 (2024-07-31)](release-notes/rl-2.24.md) - - [Release 2.23 (2024-06-03)](release-notes/rl-2.23.md) - - [Release 2.22 (2024-04-23)](release-notes/rl-2.22.md) - - [Release 2.21 (2024-03-11)](release-notes/rl-2.21.md) - - [Release 2.20 (2024-01-29)](release-notes/rl-2.20.md) - - [Release 2.19 (2023-11-17)](release-notes/rl-2.19.md) - - [Release 2.18 (2023-09-20)](release-notes/rl-2.18.md) - - [Release 2.17 (2023-07-24)](release-notes/rl-2.17.md) - - [Release 2.16 (2023-05-31)](release-notes/rl-2.16.md) - - [Release 2.15 (2023-04-11)](release-notes/rl-2.15.md) - - [Release 2.14 (2023-02-28)](release-notes/rl-2.14.md) - - [Release 2.13 (2023-01-17)](release-notes/rl-2.13.md) - - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md) - - [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md) - - [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md) - - [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md) - - [Release 2.8 (2022-04-19)](release-notes/rl-2.8.md) - - [Release 2.7 (2022-03-07)](release-notes/rl-2.7.md) - - [Release 2.6 (2022-01-24)](release-notes/rl-2.6.md) - - [Release 2.5 (2021-12-13)](release-notes/rl-2.5.md) - - [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md) - - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - - [Release 2.0 (2018-02-22)](release-notes/rl-2.0.md) - - [Release 1.11.10 (2017-06-12)](release-notes/rl-1.11.10.md) - - [Release 1.11 (2016-01-19)](release-notes/rl-1.11.md) - - [Release 1.10 (2015-09-03)](release-notes/rl-1.10.md) - - [Release 1.9 (2015-06-12)](release-notes/rl-1.9.md) - - [Release 1.8 (2014-12-14)](release-notes/rl-1.8.md) - - [Release 1.7 (2014-04-11)](release-notes/rl-1.7.md) - - [Release 1.6.1 (2013-10-28)](release-notes/rl-1.6.1.md) - - [Release 1.6 (2013-09-10)](release-notes/rl-1.6.md) - - [Release 1.5.2 (2013-05-13)](release-notes/rl-1.5.2.md) - - [Release 1.5 (2013-02-27)](release-notes/rl-1.5.md) - - [Release 1.4 (2013-02-26)](release-notes/rl-1.4.md) - - [Release 1.3 (2013-01-04)](release-notes/rl-1.3.md) - - [Release 1.2 (2012-12-06)](release-notes/rl-1.2.md) - - [Release 1.1 (2012-07-18)](release-notes/rl-1.1.md) - - [Release 1.0 (2012-05-11)](release-notes/rl-1.0.md) - - [Release 0.16 (2010-08-17)](release-notes/rl-0.16.md) - - [Release 0.15 (2010-03-17)](release-notes/rl-0.15.md) - - [Release 0.14 (2010-02-04)](release-notes/rl-0.14.md) - - [Release 0.13 (2009-11-05)](release-notes/rl-0.13.md) - - [Release 0.12 (2008-11-20)](release-notes/rl-0.12.md) - - [Release 0.11 (2007-12-31)](release-notes/rl-0.11.md) - - [Release 0.10.1 (2006-10-11)](release-notes/rl-0.10.1.md) - - [Release 0.10 (2006-10-06)](release-notes/rl-0.10.md) - - [Release 0.9.2 (2005-09-21)](release-notes/rl-0.9.2.md) - - [Release 0.9.1 (2005-09-20)](release-notes/rl-0.9.1.md) - - [Release 0.9 (2005-09-16)](release-notes/rl-0.9.md) - - [Release 0.8.1 (2005-04-13)](release-notes/rl-0.8.1.md) - - [Release 0.8 (2005-04-11)](release-notes/rl-0.8.md) - - [Release 0.7 (2005-01-12)](release-notes/rl-0.7.md) - - [Release 0.6 (2004-11-14)](release-notes/rl-0.6.md) - - [Release 0.5 and earlier](release-notes/rl-0.5.md) diff --git a/doc/manual/source/advanced-topics/distributed-builds.md b/doc/manual/source/advanced-topics/distributed-builds.md index 08a980643e88..c39cf4500795 100644 --- a/doc/manual/source/advanced-topics/distributed-builds.md +++ b/doc/manual/source/advanced-topics/distributed-builds.md @@ -5,8 +5,8 @@ this allows multiple builds to be performed in parallel. Remote builds also allow Nix to perform multi-platform builds in a semi-transparent way. For example, if you perform a build for a -`x86_64-darwin` on an `i686-linux` machine, Nix can automatically -forward the build to a `x86_64-darwin` machine, if one is available. +`aarch64-darwin` on an `x86_64-linux` machine, Nix can automatically +forward the build to a `aarch64-darwin` machine, if one is available. ## Requirements @@ -59,7 +59,7 @@ then you need to ensure that the `PATH` of non-interactive login shells contains Nix. The [list of remote build machines](@docroot@/command-ref/conf-file.md#conf-builders) can be specified on the command line or in the Nix configuration file. -For example, the following command allows you to build a derivation for `x86_64-darwin` on a Linux machine: +For example, the following command allows you to build a derivation for `aarch64-darwin` on a Linux machine: ```console uname @@ -71,8 +71,8 @@ Linux ```console nix build --impure \ - --expr '(with import { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ - --builders 'ssh://mac x86_64-darwin' + --expr '(with import { system = "aarch64-darwin"; }; runCommand "foo" {} "uname > $out")' \ + --builders 'ssh://mac aarch64-darwin' ``` ```console @@ -90,12 +90,12 @@ Darwin It is possible to specify multiple build machines separated by a semicolon or a newline, e.g. ```console - --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' + --builders 'ssh://mac aarch64-darwin ; ssh://beastie x86_64-freebsd' ``` Remote build machines can also be configured in [`nix.conf`](@docroot@/command-ref/conf-file.md), e.g. - builders = ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd + builders = ssh://mac aarch64-darwin ; ssh://beastie x86_64-freebsd After making changes to `nix.conf`, restart the Nix daemon for changes to take effect. @@ -107,4 +107,4 @@ file included in `builders` via the syntax `@/path/to/file`. For example, causes the list of machines in `/etc/nix/machines` to be included. (This is the default.) -[Nix instance]: @docroot@/glossary.md#gloss-nix-instance \ No newline at end of file +[Nix instance]: @docroot@/glossary.md#gloss-nix-instance diff --git a/doc/manual/source/command-ref/env-common.md b/doc/manual/source/command-ref/env-common.md index 7be064a13804..35d682949ba8 100644 --- a/doc/manual/source/command-ref/env-common.md +++ b/doc/manual/source/command-ref/env-common.md @@ -97,7 +97,7 @@ Most Nix commands interpret the following environment variables: This variable should be set to `daemon` if you want to use the Nix daemon to execute Nix operations. This is necessary in [multi-user - Nix installations](@docroot@/installation/multi-user.md). If the Nix + Nix installations](@docroot@/installation/nix-security.md#multi-user-model). If the Nix daemon's Unix socket is at some non-standard path, this variable should be set to `unix://path/to/socket`. Otherwise, it should be left unset. diff --git a/doc/manual/source/command-ref/experimental-commands.md b/doc/manual/source/command-ref/experimental-commands.md deleted file mode 100644 index 1190729a2305..000000000000 --- a/doc/manual/source/command-ref/experimental-commands.md +++ /dev/null @@ -1,8 +0,0 @@ -# Experimental Commands - -This section lists [experimental commands](@docroot@/development/experimental-features.md#xp-feature-nix-command). - -> **Warning** -> -> These commands may be removed in the future, or their syntax may -> change in incompatible ways. diff --git a/doc/manual/source/command-ref/files/default-nix-expression.md b/doc/manual/source/command-ref/files/default-nix-expression.md index cd73b8e58ab7..66ce84b48f8b 100644 --- a/doc/manual/source/command-ref/files/default-nix-expression.md +++ b/doc/manual/source/command-ref/files/default-nix-expression.md @@ -31,12 +31,12 @@ Then, the resulting expression is interpreted like this: The file [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) is always ignored. -The command [`nix-channel`] places a symlink to the current user's [channels] in this directory, the [user channel link](#user-channel-link). +The command [`nix-channel`] places a symlink to the current user's channels in this directory, the [user channel link](#user-channel-link). This makes all subscribed channels available as attributes in the default expression. ## User channel link -A symlink that ensures that [`nix-env`] can find the current user's [channels]: +A symlink that ensures that [`nix-env`] can find the current user's channels: - `~/.nix-defexpr/channels` - `$XDG_STATE_HOME/nix/defexpr/channels` if [`use-xdg-base-directories`] is set to `true`. @@ -51,4 +51,3 @@ In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, [`nix-channel`]: @docroot@/command-ref/nix-channel.md [`nix-env`]: @docroot@/command-ref/nix-env.md [`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories -[channels]: @docroot@/command-ref/files/channels.md diff --git a/doc/manual/source/command-ref/files/profiles.md b/doc/manual/source/command-ref/files/profiles.md index 4f022acceb3e..f137336747f1 100644 --- a/doc/manual/source/command-ref/files/profiles.md +++ b/doc/manual/source/command-ref/files/profiles.md @@ -67,7 +67,7 @@ By default, this symlink points to: - `$NIX_STATE_DIR/profiles/per-user/root/profile` for `root` The `PATH` environment variable should include `/bin` subdirectory of the profile link (e.g. `~/.nix-profile/bin`) for the user environment to be visible to the user. -The [installer](@docroot@/installation/installing-binary.md) sets this up by default, unless you enable [`use-xdg-base-directories`]. +The installer sets this up by default, unless you enable [`use-xdg-base-directories`]. [`nix-env`]: @docroot@/command-ref/nix-env.md [`nix profile`]: @docroot@/command-ref/new-cli/nix3-profile.md diff --git a/doc/manual/source/command-ref/nix-channel.md b/doc/manual/source/command-ref/nix-channel.md index 865f43ccce5b..59817be974b5 100644 --- a/doc/manual/source/command-ref/nix-channel.md +++ b/doc/manual/source/command-ref/nix-channel.md @@ -8,6 +8,12 @@ # Description +> **Warning** +> +> nix-channel is deprecated in favor of flakes in Determinate Nix. +> For a guide on Nix flakes, see: . +> For details and to offer feedback on the deprecation process, see: . + Channels are a mechanism for referencing remote Nix expressions and conveniently retrieving their latest version. The moving parts of channels are: diff --git a/doc/manual/source/command-ref/nix-env.md b/doc/manual/source/command-ref/nix-env.md index bda02149ed06..d01caaf7f787 100644 --- a/doc/manual/source/command-ref/nix-env.md +++ b/doc/manual/source/command-ref/nix-env.md @@ -52,7 +52,7 @@ These pages can be viewed offline: `nix-env` can obtain packages from multiple sources: - An attribute set of derivations from: - - The [default Nix expression](@docroot@/command-ref/files/default-nix-expression.md) (by default) + - The default Nix expression (by default) - A Nix file, specified via `--file` - A [profile](@docroot@/command-ref/files/profiles.md), specified via `--from-profile` - A Nix expression that is a function which takes default expression as argument, specified via `--from-expression` diff --git a/doc/manual/source/command-ref/nix-env/install.md b/doc/manual/source/command-ref/nix-env/install.md index 06aeb3925628..320fa530fdaa 100644 --- a/doc/manual/source/command-ref/nix-env/install.md +++ b/doc/manual/source/command-ref/nix-env/install.md @@ -22,12 +22,11 @@ It is based on the current generation of the active [profile](@docroot@/command- The arguments *args* map to store paths in a number of possible ways: -- By default, *args* is a set of names denoting derivations in the [default Nix expression]. +- By default, *args* is a set of names denoting derivations in the default Nix expression. These are [realised], and the resulting output paths are installed. Currently installed derivations with a name equal to the name of a derivation being added are removed unless the option `--preserve-installed` is specified. [derivation expression]: @docroot@/glossary.md#gloss-derivation-expression - [default Nix expression]: @docroot@/command-ref/files/default-nix-expression.md [realised]: @docroot@/glossary.md#gloss-realise If there are multiple derivations matching a name in *args* that @@ -45,7 +44,7 @@ The arguments *args* map to store paths in a number of possible ways: gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will probably cause a user environment conflict\!). -- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the [default Nix expression]. +- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the default Nix expression. This is faster than using derivation names and unambiguous. Show the attribute paths of available packages with [`nix-env --query`](./query.md): @@ -58,7 +57,7 @@ The arguments *args* map to store paths in a number of possible ways: easy way to copy user environment elements from one profile to another. -- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the [default Nix expression] as their single argument. +- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the default Nix expression as their single argument. The derivations returned by those function calls are installed. This allows derivations to be specified in an unambiguous way, which is necessary if there are multiple derivations with the same name. diff --git a/doc/manual/source/command-ref/nix-store/query.md b/doc/manual/source/command-ref/nix-store/query.md index aeb696535989..cc45eeb74cf1 100644 --- a/doc/manual/source/command-ref/nix-store/query.md +++ b/doc/manual/source/command-ref/nix-store/query.md @@ -103,6 +103,13 @@ symlink. example when *paths* were substituted from a binary cache. Use `--valid-derivers` instead to obtain valid paths only. + > **Note** + > + > `nix-store --query --deriver` is replaced with the following `nix` command: + > + > nix path-info --json ... | jq -r '.[].deriver' + + [deriver]: @docroot@/glossary.md#gloss-deriver - `--valid-derivers` diff --git a/doc/manual/source/command-ref/subcommands.md b/doc/manual/source/command-ref/subcommands.md new file mode 100644 index 000000000000..6a26732338d1 --- /dev/null +++ b/doc/manual/source/command-ref/subcommands.md @@ -0,0 +1,3 @@ +# Subcommands + +This section lists all the subcommands of the `nix` CLI. diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index 742170f76c6c..917e39e1ca5f 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -1,77 +1,9 @@ # Building Nix -This section provides some notes on how to start hacking on Nix. -To get the latest version of Nix from GitHub: - > **Note** > > When checking out the repo on Windows, make sure you have the git setting `core.symlinks` enabled, before cloning, as there are symlinks in the repo. -```console -$ git clone https://github.com/NixOS/nix.git -$ cd nix -``` - -> **Note** -> -> The following instructions assume you already have some version of Nix installed locally, so that you can use it to set up the development environment. -> If you don't have it installed, follow the [installation instructions](../installation/index.md). - - -To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: - -```console -$ nix-shell -``` - -To get a shell with one of the other [supported compilation environments](#compilation-environments): - -```console -$ nix-shell --attr devShells.x86_64-linux.native-clangStdenv -``` - -> **Note** -> -> You can use `native-ccacheStdenv` to drastically improve rebuild time. -> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. - -To build Nix itself in this shell: - -```console -[nix-shell]$ out="$(pwd)/outputs/out" dev=$out debug=$out mesonFlags+=" --prefix=${out}" -[nix-shell]$ dontAddPrefix=1 configurePhase -[nix-shell]$ buildPhase -``` - -To test it: - -```console -[nix-shell]$ checkPhase -``` - -To install it in `$(pwd)/outputs`: - -```console -[nix-shell]$ installPhase -[nix-shell]$ ./outputs/out/bin/nix --version -nix (Nix) 2.12 -``` - -To build a release version of Nix for the current operating system and CPU architecture: - -```console -$ nix-build -``` - -You can also build Nix for one of the [supported platforms](#platforms). - -## Building Nix with flakes - -This section assumes you are using Nix with the [`flakes`] and [`nix-command`] experimental features enabled. - -[`flakes`]: @docroot@/development/experimental-features.md#xp-feature-flakes -[`nix-command`]: @docroot@/development/experimental-features.md#xp-feature-nix-command - To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: ```console @@ -130,8 +62,6 @@ Nix can be built for various platforms, as specified in [`flake.nix`]: [`flake.nix`]: https://github.com/nixos/nix/blob/master/flake.nix - `x86_64-linux` -- `x86_64-darwin` -- `i686-linux` - `aarch64-linux` - `aarch64-darwin` - `armv6l-linux` @@ -149,12 +79,6 @@ platform. Common solutions include [remote build machines] and [binary format em Given such a setup, executing the build only requires selecting the respective attribute. For example, to compile for `aarch64-linux`: -```console -$ nix-build --attr packages.aarch64-linux.default -``` - -or for Nix with the [`flakes`] and [`nix-command`] experimental features enabled: - ```console $ nix build .#packages.aarch64-linux.default ``` @@ -223,6 +147,7 @@ For historic reasons and backward-compatibility, some CPU and OS identifiers are |-----------------------------|-------------------------|---------------------| | `x86` | | `i686` | | `arm` | | `host_machine.cpu()`| +| `arm64` | | `host_machine.cpu()`| | `ppc` | `little` | `powerpcle` | | `ppc64` | `little` | `powerpc64le` | | `ppc` | `big` | `powerpc` | @@ -247,20 +172,12 @@ To build with one of those environments, you can use $ nix build .#nix-cli-ccacheStdenv ``` -for flake-enabled Nix, or - -```console -$ nix-build --attr nix-cli-ccacheStdenv -``` - -for classic Nix. - You can use any of the other supported environments in place of `nix-cli-ccacheStdenv`. ## Editor integration The `clangd` LSP server is installed by default on the `clang`-based `devShell`s. -See [supported compilation environments](#compilation-environments) and instructions how to set up a shell [with flakes](#building-nix-with-flakes) or in [classic Nix](#building-nix). +See [supported compilation environments](#compilation-environments) and instructions how to [set up a shell](#building-nix). To use the LSP with your editor, you will want a `compile_commands.json` file telling `clangd` how we are compiling the code. Meson's configure always produces this inside the build directory. diff --git a/doc/manual/source/development/experimental-features.md b/doc/manual/source/development/experimental-features.md index ad5cffa91ee5..56a45b23890a 100644 --- a/doc/manual/source/development/experimental-features.md +++ b/doc/manual/source/development/experimental-features.md @@ -6,7 +6,7 @@ Experimental features are considered unstable, which means that they can be chan Users must explicitly enable them by toggling the associated [experimental feature flags](@docroot@/command-ref/conf-file.md#conf-experimental-features). This allows accessing unstable functionality without unwittingly relying on it. -Experimental feature flags were first introduced in [Nix 2.4](@docroot@/release-notes/rl-2.4.md). +Experimental feature flags were first introduced in [Nix 2.4](https://nix.dev/manual/nix/latest/release-notes/rl-2.4). Before that, Nix did have experimental features, but they were not guarded by flags and were merely documented as unstable. This was a source of confusion and controversy. diff --git a/doc/manual/source/development/testing.md b/doc/manual/source/development/testing.md index dd965862a34c..35654d163935 100644 --- a/doc/manual/source/development/testing.md +++ b/doc/manual/source/development/testing.md @@ -325,7 +325,6 @@ Creating a Cachix cache for your installer tests and adding its authorisation to - `x86_64-linux` - `armv6l-linux` - `armv7l-linux` - - `x86_64-darwin` - The `installer_test` job (which runs on `ubuntu-24.04` and `macos-14`) will try to install Nix with the cached installer and run a trivial Nix command. diff --git a/doc/manual/source/favicon.png b/doc/manual/source/favicon.png deleted file mode 100644 index 1ed2b5fe0fdf..000000000000 Binary files a/doc/manual/source/favicon.png and /dev/null differ diff --git a/doc/manual/source/favicon.svg b/doc/manual/source/favicon.svg index 1d2a6e835d5f..55fb9479b06e 100644 --- a/doc/manual/source/favicon.svg +++ b/doc/manual/source/favicon.svg @@ -1 +1,29 @@ - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/manual/source/glossary.md b/doc/manual/source/glossary.md index af5b45314d1d..64ca1cf5e167 100644 --- a/doc/manual/source/glossary.md +++ b/doc/manual/source/glossary.md @@ -353,14 +353,6 @@ See [Nix Archive](store/file-system-object/content-address.html#serial-nix-archive) for details. -- [`∅`]{#gloss-empty-set} - - The empty set symbol. In the context of profile history, this denotes a package is not present in a particular version of the profile. - -- [`ε`]{#gloss-epsilon} - - The epsilon symbol. In the context of a package, this means the version is empty. More precisely, the derivation does not have a version attribute. - - [package]{#package} A software package; files that belong together for a particular purpose, and metadata. diff --git a/doc/manual/source/installation/env-variables.md b/doc/manual/source/installation/env-variables.md deleted file mode 100644 index 0350904211ac..000000000000 --- a/doc/manual/source/installation/env-variables.md +++ /dev/null @@ -1,62 +0,0 @@ -# Environment Variables - -To use Nix, some environment variables should be set. In particular, -`PATH` should contain the directories `prefix/bin` and -`~/.nix-profile/bin`. The first directory contains the Nix tools -themselves, while `~/.nix-profile` is a symbolic link to the current -*user environment* (an automatically generated package consisting of -symlinks to installed packages). The simplest way to set the required -environment variables is to include the file -`prefix/etc/profile.d/nix.sh` in your `~/.profile` (or similar), like -this: - -```bash -source prefix/etc/profile.d/nix.sh -``` - -# `NIX_SSL_CERT_FILE` - -If you need to specify a custom certificate bundle to account for an -HTTPS-intercepting man in the middle proxy, you must specify the path to -the certificate bundle in the environment variable `NIX_SSL_CERT_FILE`. - -If you don't specify a `NIX_SSL_CERT_FILE` manually, Nix will install -and use its own certificate bundle. - -Set the environment variable and install Nix - -```console -$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -$ curl -L https://nixos.org/nix/install | sh -``` - -In the shell profile and rc files (for example, `/etc/bashrc`, -`/etc/zshrc`), add the following line: - -```bash -export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -``` - -> **Note** -> -> You must not add the export and then do the install, as the Nix -> installer will detect the presence of Nix configuration, and abort. - -If you use the Nix daemon, you should also add the following to -`/etc/nix/nix.conf`: - -``` -ssl-cert-file = /etc/ssl/my-certificate-bundle.crt -``` - -## Proxy Environment Variables - -The Nix installer has special handling for these proxy-related -environment variables: `http_proxy`, `https_proxy`, `ftp_proxy`, -`all_proxy`, `no_proxy`, `HTTP_PROXY`, `HTTPS_PROXY`, `FTP_PROXY`, -`ALL_PROXY`, `NO_PROXY`. - -If any of these variables are set when running the Nix installer, then -the installer will create an override file at -`/etc/systemd/system/nix-daemon.service.d/override.conf` so `nix-daemon` -will use them. diff --git a/doc/manual/source/installation/index.md b/doc/manual/source/installation/index.md index 3c09f103184a..aded684b0b59 100644 --- a/doc/manual/source/installation/index.md +++ b/doc/manual/source/installation/index.md @@ -1,44 +1,11 @@ # Installation -This section describes how to install and configure Nix for first-time use. - -The current recommended option on Linux and MacOS is [multi-user](#multi-user). - -## Multi-user - -This installation offers better sharing, improved isolation, and more security -over a single user installation. - -This option requires either: - -* Linux running systemd, with SELinux disabled -* MacOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: ```console -$ curl -L https://nixos.org/nix/install | sh -s -- --daemon -``` - -## Single-user - -> Single-user is not supported on Mac. - -> `warning: installing Nix as root is not supported by this script!` - -This installation has less requirements than the multi-user install, however it -cannot offer equivalent sharing, isolation, or security. - -This option is suitable for systems without systemd. - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install ``` ## Distributions @@ -46,3 +13,5 @@ $ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon The Nix community maintains installers for several distributions. They can be found in the [`nix-community/nix-installers`](https://github.com/nix-community/nix-installers) repository. + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/installation/installing-binary.md b/doc/manual/source/installation/installing-binary.md deleted file mode 100644 index 21c15637437d..000000000000 --- a/doc/manual/source/installation/installing-binary.md +++ /dev/null @@ -1,158 +0,0 @@ -# Installing a Binary Distribution - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -To install the latest version Nix, run the following command: - -```console -$ curl -L https://nixos.org/nix/install | sh -``` - -This performs the default type of installation for your platform: - -- [Multi-user](#multi-user-installation): - - Linux with systemd and without SELinux - - macOS -- [Single-user](#single-user-installation): - - Linux without systemd - - Linux with SELinux - -We recommend the multi-user installation if it supports your platform and you can authenticate with `sudo`. - -The installer can be configured with various command line arguments and environment variables. -To show available command line flags: - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --help -``` - -To check what it does and how it can be customised further, [download and edit the second-stage installation script](#installing-from-a-binary-tarball). - -# Installing a pinned Nix version from a URL - -Version-specific installation URLs for all Nix versions since 1.11.16 can be found at [releases.nixos.org](https://releases.nixos.org/?prefix=nix/). -The directory for each version contains the corresponding SHA-256 hash. - -All installation scripts are invoked the same way: - -```console -$ export VERSION=2.19.2 -$ curl -L https://releases.nixos.org/nix/nix-$VERSION/install | sh -``` - -# Multi User Installation - -The multi-user Nix installation creates system users and a system service for the Nix daemon. - -Supported systems: - -- Linux running systemd, with SELinux disabled -- macOS - -To explicitly instruct the installer to perform a multi-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --daemon -``` - -You can run this under your usual user account or `root`. -The script will invoke `sudo` as needed. - -# Single User Installation - -To explicitly select a single-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --no-daemon -``` - -In a single-user installation, `/nix` is owned by the invoking user. -The script will invoke `sudo` to create `/nix` if it doesn’t already exist. -If you don’t have `sudo`, manually create `/nix` as `root`: - -```console -$ su root -# mkdir /nix -# chown alice /nix -``` - -# Installing from a binary tarball - -You can also download a binary tarball that contains Nix and all its dependencies: -- Choose a [version](https://releases.nixos.org/?prefix=nix/) and [system type](../development/building.md#platforms) -- Download and unpack the tarball -- Run the installer - -> **Example** -> -> ```console -> $ pushd $(mktemp -d) -> $ export VERSION=2.19.2 -> $ export SYSTEM=x86_64-linux -> $ curl -LO https://releases.nixos.org/nix/nix-$VERSION/nix-$VERSION-$SYSTEM.tar.xz -> $ tar xfj nix-$VERSION-$SYSTEM.tar.xz -> $ cd nix-$VERSION-$SYSTEM -> $ ./install -> $ popd -> ``` - -The installer can be customised with the environment variables declared in the file named `install-multi-user`. - -## Native packages for Linux distributions - -The Nix community maintains installers for some Linux distributions in their native packaging format(https://nix-community.github.io/nix-installers/). - -# macOS Installation - - -[]{#sect-macos-installation-change-store-prefix}[]{#sect-macos-installation-encrypted-volume}[]{#sect-macos-installation-symlink}[]{#sect-macos-installation-recommended-notes} - -We believe we have ironed out how to cleanly support the read-only root file system -on modern macOS. New installs will do this automatically. - -This section previously detailed the situation, options, and trade-offs, -but it now only outlines what the installer does. You don't need to know -this to run the installer, but it may help if you run into trouble: - -- create a new APFS volume for your Nix store -- update `/etc/synthetic.conf` to direct macOS to create a "synthetic" - empty root directory to mount your volume -- specify mount options for the volume in `/etc/fstab` - - `rw`: read-write - - `noauto`: prevent the system from auto-mounting the volume (so the - LaunchDaemon mentioned below can control mounting it, and to avoid - masking problems with that mounting service). - - `nobrowse`: prevent the Nix Store volume from showing up on your - desktop; also keeps Spotlight from spending resources to index - this volume - -- if you have FileVault enabled - - generate an encryption password - - put it in your system Keychain - - use it to encrypt the volume -- create a system LaunchDaemon to mount this volume early enough in the - boot process to avoid problems loading or restoring any programs that - need access to your Nix store - diff --git a/doc/manual/source/installation/nix-security.md b/doc/manual/source/installation/nix-security.md index 1e9036b68b21..61cad24c2b3b 100644 --- a/doc/manual/source/installation/nix-security.md +++ b/doc/manual/source/installation/nix-security.md @@ -1,15 +1,85 @@ # Security -Nix has two basic security models. First, it can be used in “single-user -mode”, which is similar to what most other package management tools do: -there is a single user (typically root) who performs all package -management operations. All other users can then use the installed -packages, but they cannot perform package management operations -themselves. - -Alternatively, you can configure Nix in “multi-user mode”. In this -model, all users can perform package management operations — for -instance, every user can install software without requiring root -privileges. Nix ensures that this is secure. For instance, it’s not -possible for one user to overwrite a package used by another user with a -Trojan horse. +Nix follows a [**multi-user**](#multi-user-model) security model in which all +users can perform package management operations. Every user can, for example, +install software without requiring root privileges, and Nix ensures that this +is secure. It's *not* possible for one user to, for example, overwrite a +package used by another user with a Trojan horse. + +## Multi-User model + +To allow a Nix store to be shared safely among multiple users, it is +important that users are not able to run builders that modify the Nix +store or database in arbitrary ways, or that interfere with builds +started by other users. If they could do so, they could install a Trojan +horse in some package and compromise the accounts of other users. + +To prevent this, the Nix store and database are owned by some privileged +user (usually `root`) and builders are executed under special user +accounts (usually named `nixbld1`, `nixbld2`, etc.). When a unprivileged +user runs a Nix command, actions that operate on the Nix store (such as +builds) are forwarded to a *Nix daemon* running under the owner of the +Nix store/database that performs the operation. + +> **Note** +> +> Multi-user mode has one important limitation: only root and a set of +> trusted users specified in `nix.conf` can specify arbitrary binary +> caches. So while unprivileged users may install packages from +> arbitrary Nix expressions, they may not get pre-built binaries. + +### Setting up the build users + +The *build users* are the special UIDs under which builds are performed. +They should all be members of the *build users group* `nixbld`. This +group should have no other members. The build users should not be +members of any other group. On Linux, you can create the group and users +as follows: + +```console +$ groupadd -r nixbld +$ for n in $(seq 1 10); do useradd -c "Nix build user $n" \ + -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" \ + nixbld$n; done +``` + +This creates 10 build users. There can never be more concurrent builds +than the number of build users, so you may want to increase this if you +expect to do many builds at the same time. + +### Running the daemon + +The [Nix daemon](../command-ref/nix-daemon.md) should be started as +follows (as `root`): + +```console +$ nix-daemon +``` + +You’ll want to put that line somewhere in your system’s boot scripts. + +To let unprivileged users use the daemon, they should set the +[`NIX_REMOTE` environment variable](../command-ref/env-common.md) to +`daemon`. So you should put a line like + +```console +export NIX_REMOTE=daemon +``` + +into the users’ login scripts. + +### Restricting access + +To limit which users can perform Nix operations, you can use the +permissions on the directory `/nix/var/nix/daemon-socket`. For instance, +if you want to restrict the use of Nix to the members of a group called +`nix-users`, do + +```console +$ chgrp nix-users /nix/var/nix/daemon-socket +$ chmod ug=rwx,o= /nix/var/nix/daemon-socket +``` + +This way, users who are not in the `nix-users` group cannot connect to +the Unix domain socket `/nix/var/nix/daemon-socket/socket`, so they +cannot perform Nix operations. diff --git a/doc/manual/source/installation/single-user.md b/doc/manual/source/installation/single-user.md deleted file mode 100644 index f9a3b26edf41..000000000000 --- a/doc/manual/source/installation/single-user.md +++ /dev/null @@ -1,9 +0,0 @@ -# Single-User Mode - -In single-user mode, all Nix operations that access the database in -`prefix/var/nix/db` or modify the Nix store in `prefix/store` must be -performed under the user ID that owns those directories. This is -typically root. (If you install from RPM packages, that’s in fact the -default ownership.) However, on single-user machines, it is often -convenient to `chown` those directories to your normal user account so -that you don’t have to `su` to root all the time. diff --git a/doc/manual/source/installation/supported-platforms.md b/doc/manual/source/installation/supported-platforms.md deleted file mode 100644 index 8ca3ce8d445e..000000000000 --- a/doc/manual/source/installation/supported-platforms.md +++ /dev/null @@ -1,7 +0,0 @@ -# Supported Platforms - -Nix is currently supported on the following platforms: - - - Linux (i686, x86\_64, aarch64). - - - macOS (x86\_64, aarch64). diff --git a/doc/manual/source/installation/uninstall.md b/doc/manual/source/installation/uninstall.md index 4bb1c0e6ef48..e95634c213a1 100644 --- a/doc/manual/source/installation/uninstall.md +++ b/doc/manual/source/installation/uninstall.md @@ -1,196 +1,15 @@ # Uninstalling Nix -## Multi User +To uninstall Determinate Nix, use the uninstallation utility built into the [Determinate Nix Installer][installer]: -Removing a [multi-user installation](./installing-binary.md#multi-user-installation) depends on the operating system. - -### Linux - -If you are on Linux with systemd: - -1. Remove the Nix daemon service: - - ```console - sudo systemctl stop nix-daemon.service - sudo systemctl disable nix-daemon.socket nix-daemon.service - sudo systemctl daemon-reload - ``` - -2. Remove files created by Nix: - - ```console - sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~/.local/share/nix ~/.local/state/nix ~/.cache/nix ~/.nix-defexpr ~/.nix-profile ~/.nix-channels ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix - ``` - -3. Remove build users and their group: - - ```console - for i in $(seq 1 32); do - sudo userdel nixbld$i - done - sudo groupdel nixbld - ``` - -4. There may also be references to Nix in - - `/etc/bash.bashrc` - - `/etc/bashrc` - - `/etc/profile` - - `/etc/zsh/zshrc` - - `/etc/zshrc` - - which you may remove. - -### FreeBSD - -1. Stop and remove the Nix daemon service: - - ```console - sudo service nix-daemon stop - sudo rm -f /usr/local/etc/rc.d/nix-daemon - sudo sysrc -x nix_daemon_enable - ``` - -2. Remove files created by Nix: - - ```console - sudo rm -rf /etc/nix /usr/local/etc/profile.d/nix.sh /nix ~/.local/share/nix ~/.local/state/nix ~/.cache/nix ~/.nix-defexpr ~/.nix-profile ~/.nix-channels ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix - ``` - -3. Remove build users and their group: - - ```console - for i in $(seq 1 32); do - sudo pw userdel nixbld$i - done - sudo pw groupdel nixbld - ``` - -4. There may also be references to Nix in: - - `/usr/local/etc/bashrc` - - `/usr/local/etc/zshrc` - - Shell configuration files in users' home directories - - which you may remove. - -### macOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer: - - ```console - sudo mv /etc/zshrc.backup-before-nix /etc/zshrc - sudo mv /etc/bashrc.backup-before-nix /etc/bashrc - sudo mv /etc/bash.bashrc.backup-before-nix /etc/bash.bashrc - ``` - - Otherwise, edit `/etc/zshrc`, `/etc/bashrc`, and `/etc/bash.bashrc` to remove the lines sourcing `nix-daemon.sh`, which should look like this: - - ```bash - # Nix - if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then - . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' - fi - # End Nix - ``` - -2. Stop and remove the Nix daemon services: - - ```console - sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist - sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist - ``` - - This stops the Nix daemon and prevents it from being started next time you boot the system. - -3. Remove the `nixbld` group and the `_nixbuildN` users: - - ```console - sudo dscl . -delete /Groups/nixbld - for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done - ``` - - This will remove all the build users that no longer serve a purpose. - -4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store volume on `/nix`, which looks like - - ``` - UUID= /nix apfs rw,noauto,nobrowse,suid,owners - ``` - or - - ``` - LABEL=Nix\040Store /nix apfs rw,nobrowse - ``` - - by setting the cursor on the respective line using the arrow keys, and pressing `dd`, and then `:wq` to save the file. - - This will prevent automatic mounting of the Nix Store volume. - -5. Edit `/etc/synthetic.conf` to remove the `nix` line. - If this is the only line in the file you can remove it entirely: - - ```bash - if [ -f /etc/synthetic.conf ]; then - if [ "$(cat /etc/synthetic.conf)" = "nix" ]; then - sudo rm /etc/synthetic.conf - else - sudo vi /etc/synthetic.conf - fi - fi - ``` - - This will prevent the creation of the empty `/nix` directory. - -6. Remove the files Nix added to your system, except for the store: - - ```console - sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels ~/.local/share/nix ~/.local/state/nix ~/.cache/nix - ``` - - -7. Remove the Nix Store volume: - - ```console - sudo diskutil apfs deleteVolume /nix - ``` - - This will remove the Nix Store volume and everything that was added to the store. - - If the output indicates that the command couldn't remove the volume, you should make sure you don't have an _unmounted_ Nix Store volume. - Look for a "Nix Store" volume in the output of the following command: - - ```console - diskutil list - ``` - - If you _do_ find a "Nix Store" volume, delete it by running `diskutil apfs deleteVolume` with the store volume's `diskXsY` identifier. - - If you get an error that the volume is in use by the kernel, reboot and immediately delete the volume before starting any other process. - -> **Note** -> -> After you complete the steps here, you will still have an empty `/nix` directory. -> This is an expected sign of a successful uninstall. -> The empty `/nix` directory will disappear the next time you reboot. -> -> You do not have to reboot to finish uninstalling Nix. -> The uninstall is complete. -> macOS (Catalina+) directly controls root directories, and its read-only root will prevent you from manually deleting the empty `/nix` mountpoint. - -## Single User +```console +$ /nix/nix-installer uninstall +``` -To remove a [single-user installation](./installing-binary.md#single-user-installation) of Nix, run: +If you're certain that you want to uninstall, you can skip the confirmation step: ```console -rm -rf /nix ~/.nix-channels ~/.nix-defexpr ~/.nix-profile ~/.local/share/nix ~/.local/state/nix ~/.cache/nix +$ /nix/nix-installer uninstall --no-confirm ``` -You might also want to manually remove references to Nix from your `~/.profile`. + +[installer]: https://github.com/DeterminateSystems/nix-installer diff --git a/doc/manual/source/installation/upgrading.md b/doc/manual/source/installation/upgrading.md index a433f1d30e6c..8fe342b09b7c 100644 --- a/doc/manual/source/installation/upgrading.md +++ b/doc/manual/source/installation/upgrading.md @@ -1,40 +1,10 @@ # Upgrading Nix -> **Note** -> -> These upgrade instructions apply where Nix was installed following the [installation instructions in this manual](./index.md). - -Check which Nix version will be installed, for example from one of the [release channels](http://channels.nixos.org/) such as `nixpkgs-unstable`: - -```console -$ nix-shell -p nix -I nixpkgs=channel:nixpkgs-unstable --run "nix --version" -nix (Nix) 2.18.1 -``` - -> **Warning** -> -> Writing to the [local store](@docroot@/store/types/local-store.md) with a newer version of Nix, for example by building derivations with [`nix-build`](@docroot@/command-ref/nix-build.md) or [`nix-store --realise`](@docroot@/command-ref/nix-store/realise.md), may change the database schema! -> Reverting to an older version of Nix may therefore require purging the store database before it can be used. - -## Linux multi-user +You can upgrade Determinate Nix using Determinate Nixd: ```console -$ sudo su -# nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -# systemctl daemon-reload -# systemctl restart nix-daemon +sudo determinate-nixd upgrade ``` -## macOS multi-user +Note that the `sudo` is necessary here and upgrading fails without it. -```console -$ sudo nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -$ sudo launchctl remove org.nixos.nix-daemon -$ sudo launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist -``` - -## Single-user all platforms - -```console -$ nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -``` diff --git a/doc/manual/source/introduction.md b/doc/manual/source/introduction.md index 85de7982c917..039ad6f30b1c 100644 --- a/doc/manual/source/introduction.md +++ b/doc/manual/source/introduction.md @@ -1,4 +1,19 @@ -# Introduction +# Determinate Nix + +**Determinate Nix** is a downstream distribution of [Nix], a purely functional language, CLI tool, and package management system. +It's available on Linux, macOS, and Windows Subsystem for Linux (WSL). + +## Installing + +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: + +```console +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install +``` + +## How Nix works Nix is a _purely functional package manager_. This means that it treats packages like values in a purely functional programming language @@ -184,10 +199,14 @@ to build configuration files in `/etc`). This means, among other things, that it is easy to roll back the entire configuration of the system to an earlier state. Also, users can install software without root privileges. For more information and downloads, see the [NixOS -homepage](https://nixos.org/). +homepage][nix]. ## License Nix is released under the terms of the [GNU LGPLv2.1 or (at your option) any later -version](http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html). +version][license]. + +[license]: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal +[site]: https://nixos.org diff --git a/doc/manual/source/protocols/flake-schemas.md b/doc/manual/source/protocols/flake-schemas.md new file mode 100644 index 000000000000..fa8a9a63ef1b --- /dev/null +++ b/doc/manual/source/protocols/flake-schemas.md @@ -0,0 +1,65 @@ +# Flake Schemas + +Flake schemas are a mechanism to allow tools like `nix flake show` and `nix flake check` to enumerate and check the contents of a flake +in a generic way, without requiring built-in knowledge of specific flake output types like `packages` or `nixosConfigurations`. + +A flake can define schemas for its outputs by defining a `schemas` output. `schemas` should be an attribute set with an attribute for +every output type that you want to be supported. If a flake does not have a `schemas` attribute, Nix uses a built-in set of schemas (namely https://github.com/DeterminateSystems/flake-schemas). + +A schema is an attribute set with the following attributes: + +| Attribute | Description | Default | +| :---------- | :---------------------------------------------------------------------------------------------- | :------ | +| `version` | Should be set to 1 | | +| `doc` | A string containing documentation about the flake output type in Markdown format. | | +| `allowIFD` | Whether the evaluation of the output attributes of this flake can read from derivation outputs. | `true` | +| `inventory` | A function that returns the contents of the flake output (described [below](#inventory)). | | + +# Inventory + +The `inventory` function returns a _node_ describing the contents of the flake output. A node is either a _leaf node_ or a _non-leaf node_. This allows nested flake output attributes to be described (e.g. `x86_64-linux.hello` inside a `packages` output). + +Non-leaf nodes must have the following attribute: + +| Attribute | Description | +| :--------- | :------------------------------------------------------------------------------------- | +| `children` | An attribute set of nodes. If this attribute is missing, the attribute is a leaf node. | + +Leaf nodes can have the following attributes: + +| Attribute | Description | +| :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `derivationAttrPath` | If not null, a list of strings denoting the attribute path of the "main" derivation of this node. | +| `evalChecks` | An attribute set of Boolean values, used by `nix flake check`. Each attribute must evaluate to `true`. | +| `isFlakeCheck` | Whether `nix flake check` should build the attribute denoted by `derivationAttrPath`. | +| `shortDescription` | A one-sentence description of the node (such as the `meta.description` attribute in Nixpkgs). | +| `what` | A brief human-readable string describing the type of the node, e.g. `"package"` or `"development environment"`. This is used by tools like `nix flake show` to describe the contents of a flake. | + +Both leaf and non-leaf nodes can have the following attributes: + +| Attribute | Description | +| :----------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `forSystems` | A list of Nix system types (e.g. `["x86_64-linux"]`) supported by this node. This is used by tools to skip nodes that cannot be built on the user's system. Setting this on a non-leaf node allows all the children to be skipped, regardless of the `forSystems` attributes of the children. If this attribute is not set, the node is never skipped. | +| `isLegacy` | If set to true, this node is skipped unless the `--legacy` CLI flag is set. | + +# Example + +Here is a schema that checks that every element of the `nixosConfigurations` flake output evaluates and builds correctly (meaning that it has a `config.system.build.toplevel` attribute that yields a buildable derivation). + +```nix +outputs = { + schemas.nixosConfigurations = { + version = 1; + doc = '' + The `nixosConfigurations` flake output defines NixOS system configurations. + ''; + inventory = output: { + children = builtins.mapAttrs (configName: machine: + { + what = "NixOS configuration"; + derivationAttrPath = [ "config" "system" "build" "toplevel" ]; + }) output; + }; + }; +}; +``` diff --git a/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml b/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml index 58ff070882ff..d247802cd6c2 100644 --- a/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml @@ -9,7 +9,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/protocols/json/schema/derivation-v4.yaml b/doc/manual/source/protocols/json/schema/derivation-v4.yaml index c41eef31bfc6..c1884769671e 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v4.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v4.yaml @@ -9,7 +9,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml b/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml index 3ed7e99e28d8..582b5e9eb476 100644 --- a/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml +++ b/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml @@ -6,12 +6,6 @@ description: | This schema describes the JSON representation of store object metadata as returned by commands like [`nix path-info --json`](@docroot@/command-ref/new-cli/nix3-path-info.md). - > **Warning** - > - > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and subject to change. - ### Field Categories Store object information can come in a few different variations. @@ -185,6 +179,15 @@ $defs: The total size of this store object and every other object in its [closure](@docroot@/glossary.md#gloss-closure). > This field is not stored at all, but computed by traversing the other fields across all the store objects in a closure. + + provenance: + oneOf: + - type: "null" + - type: object # FIXME + title: Provenance + description: | + An arbitrary JSON object containing provenance information about the store object, or `null` if not available. + additionalProperties: false narInfo: @@ -268,4 +271,13 @@ $defs: > This is an impure "`.narinfo`" field that may not be included in certain contexts. > This field is not stored at all, but computed by traversing the other fields across all the store objects in a closure. + + provenance: + oneOf: + - type: "null" + - type: object # FIXME + title: Provenance + description: | + An arbitrary JSON object containing provenance information about the store object, or `null` if not available. + additionalProperties: false diff --git a/doc/manual/source/protocols/json/schema/store-path-v1.yaml b/doc/manual/source/protocols/json/schema/store-path-v1.yaml index f1f58c2bf1ac..3cd7c56fcf79 100644 --- a/doc/manual/source/protocols/json/schema/store-path-v1.yaml +++ b/doc/manual/source/protocols/json/schema/store-path-v1.yaml @@ -6,12 +6,6 @@ description: | This schema describes the JSON representation of store paths as used in various Nix JSON APIs. - > **Warning** - > - > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and subject to change. - ## Format Store paths in JSON are represented as strings containing just the hash and name portion, without the store directory prefix. diff --git a/doc/manual/source/protocols/json/schema/store-v1.yaml b/doc/manual/source/protocols/json/schema/store-v1.yaml index ebe61d9cb227..e3e09c699621 100644 --- a/doc/manual/source/protocols/json/schema/store-v1.yaml +++ b/doc/manual/source/protocols/json/schema/store-v1.yaml @@ -10,7 +10,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/protocols/wasm.md b/doc/manual/source/protocols/wasm.md new file mode 100644 index 000000000000..ca67491cadd5 --- /dev/null +++ b/doc/manual/source/protocols/wasm.md @@ -0,0 +1,379 @@ +# Wasm Host Interface + +Nix provides a builtin for calling WebAssembly modules: `builtins.wasm`. This allows extending Nix with custom functionality written in languages that compile to WebAssembly (such as Rust). + +## Overview + +WebAssembly modules can interact with Nix values through a host interface that provides functions for creating and inspecting Nix values. The WASM module receives Nix values as opaque `ValueId` handles and uses host functions to work with them. + +The `builtins.wasm` builtin takes two arguments: +1. A configuration attribute set with the following attributes: + - `path` - Path to the WebAssembly module (required) + - `function` - Name of the Wasm function to call (required for non-WASI modules, not allowed for WASI modules) +2. The argument value to pass to the function + +WASI mode is automatically detected by checking if the module imports from `wasi_snapshot_preview1`. There are two calling conventions: + +- **Non-WASI mode** (no WASI imports) calls the Wasm export specified by `function` directly. The function receives its input as a `ValueId` parameter and returns a `ValueId`. +- **WASI mode** (when the module imports from `wasi_snapshot_preview1`) runs the WASI module's `_start` entry point. The input `ValueId` is passed as a command-line argument (`argv[1]`), and the result is returned by calling the `return_to_nix` host function. + +## Value IDs + +Nix values are represented in Wasm code as a `u32` referred to below as a `ValueId`. These are opaque handles that reference values managed by the Nix evaluator. Value ID 0 is reserved to represent a missing attribute lookup result. + +## Entry Points + +### Non-WASI Mode + +Non-WASI mode is used when the module does **not** import from `wasi_snapshot_preview1`. + +Usage: +```nix +builtins.wasm { + path = ; + function = ; +} +``` + +Every Wasm module used in non-WASI mode must export: +- A `memory` object that the host can use to read/write data. +- `nix_wasm_init_v1()`, a function that is called once when the module is instantiated. +- The entry point function, whose name is specified by the `function` attribute. It takes a single `ValueId` and returns a single `ValueId` (i.e. it has type `fn(arg: u32) -> u32`). + +### WASI Mode + +WASI mode is automatically used when the module imports a `wasi_snapshot_preview1` function. + +Usage: +```nix +builtins.wasm { + path = ; +} +``` + +Every WASI module must export: +- A `memory` object that the host can use to read/write data. +- `_start()`, the standard WASI entry point. This function takes no parameters. + +The input value is passed as a command-line argument: `argv[1]` is set to the decimal representation of the `ValueId` of the input value. + +To return a result to Nix, the module must call the `return_to_nix` host function (see below) with the `ValueId` of the result. If `_start` finishes without calling `return_to_nix`, an error is raised. + +Standard output and standard error from the WASI module are captured and emitted as Nix warnings (one warning per line). + +## Host Functions + +All host functions are imported from the `env` module. + +### Error Handling + +#### `panic(ptr: u32, len: u32)` + +Aborts execution with an error message. + +**Parameters:** +- `ptr` - Pointer to UTF-8 encoded error message in Wasm memory +- `len` - Length of the error message in bytes + +#### `warn(ptr: u32, len: u32)` + +Emits a warning message. + +**Parameters:** +- `ptr` - Pointer to UTF-8 encoded warning message in Wasm memory +- `len` - Length of the warning message in bytes + +### Type Inspection + +#### `get_type(value: ValueId) -> u32` + +Returns the type of a Nix value. + +**Parameters:** +- `value` - ID of a Nix value + +**Return values:** +- `1` - Integer +- `2` - Float +- `3` - Boolean +- `4` - String +- `5` - Path +- `6` - Null +- `7` - Attribute set +- `8` - List +- `9` - Function + +**Note:** Forces evaluation of the value. + +### Integer Operations + +#### `make_int(n: i64) -> ValueId` + +Creates a Nix integer value. + +**Parameters:** +- `n` - The integer value + +**Returns:** Value ID of the created integer + +#### `get_int(value: ValueId) -> i64` + +Extracts an integer from a Nix value. Throws an error if the value is not an integer. + +**Parameters:** +- `value` - ID of a Nix integer value + +**Returns:** The integer value + +### Float Operations + +#### `make_float(x: f64) -> ValueId` + +Creates a Nix float value. + +**Parameters:** +- `x` - The float value + +**Returns:** Value ID of the created float + +#### `get_float(value: ValueId) -> f64` + +Extracts a float from a Nix value. Throws an error if the value is not a float. + +**Parameters:** +- `value` - ID of a Nix float value + +**Returns:** The float value + +### Boolean Operations + +#### `make_bool(b: i32) -> ValueId` + +Creates a Nix Boolean value. + +**Parameters:** +- `b` - Boolean value (0 = false, non-zero = true) + +**Returns:** Value ID of the created Boolean + +#### `get_bool(value: ValueId) -> i32` + +Extracts a Boolean from a Nix value. Throws an error if the value is not a Boolean. + +**Parameters:** +- `value` - ID of a Nix Boolean value + +**Returns:** 0 for false, 1 for true + +### Null Operations + +#### `make_null() -> ValueId` + +Creates a Nix null value. + +**Returns:** Value ID of the null value + +### String Operations + +#### `make_string(ptr: u32, len: u32) -> ValueId` + +Creates a Nix string value from Wasm memory. + +**Parameters:** +- `ptr` - Pointer to a string in Wasm memory +- `len` - Length of the string in bytes + +**Note:** Strings do not require a null terminator. + +**Returns:** Value ID of the created string + +#### `copy_string(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix string value into Wasm memory. + +**Parameters:** +- `value` - ID of a string value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of bytes to copy + +**Returns:** The actual length of the string in bytes + +**Note:** If the returned length is greater than `max_len`, no data is copied. Call again with a larger buffer to get the full string. + +### Path Operations + +#### `make_path(base: ValueId, ptr: u32, len: u32) -> ValueId` + +Creates a Nix path value relative to a base path. + +**Parameters:** +- `base` - ID of a path value +- `ptr` - Pointer to a string in Wasm memory +- `len` - Length of the path string in bytes + +**Returns:** ID of a new path value + +**Note:** The path string is interpreted relative to the base path. The resulting path is in the same source tree ("source accessor") as the original path. + +#### `copy_path(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix path value into Wasm memory as an absolute path string. + +**Parameters:** +- `value` - ID of a path value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of bytes to copy + +**Returns:** The actual length of the path string in bytes + +**Note:** If the returned length is greater than `max_len`, no data is copied. + +### List Operations + +#### `make_list(ptr: u32, len: u32) -> ValueId` + +Creates a Nix list from an array of value IDs in Wasm memory. + +**Parameters:** +- `ptr` - Pointer to array of `ValueId` (u32) in Wasm memory +- `len` - Number of elements in the array + +**Returns:** Value ID of the created list + +**Note:** The array must contain `len * 4` bytes (each ValueId is 4 bytes). + +#### `copy_list(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix list into Wasm memory as an array of value IDs. + +**Parameters:** +- `value` - ID of a list value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of elements to copy + +**Returns:** The actual number of elements in the list + +**Note:** If the returned length is greater than `max_len`, no data is copied. Each element is written as a `ValueId` (4 bytes). The buffer must be `max_len * 4` bytes large. + +### Attribute Set Operations + +#### `make_attrset(ptr: u32, len: u32) -> ValueId` + +Creates a Nix attribute set from an array of attributes in Wasm memory. + +**Parameters:** +- `ptr` - Pointer to array of attribute structures in Wasm memory +- `len` - Number of attributes + +**Returns:** Value ID of the created attribute set + +**Attribute structure format:** +```c +struct Attr { + name_ptr: u32, // Pointer to attribute name + name_len: u32, // Length of attribute name in bytes + value_id: u32, // ID of the attribute value +} +``` + +Each `Attr` element is 12 bytes (3 × 4 bytes). + +#### `copy_attrset(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix attribute set into Wasm memory as an array of attribute structures. + +**Parameters:** +- `value` - ID of a Nix attribute set value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of attributes to copy + +**Returns:** The actual number of attributes in the set + +**Note:** If the returned length is greater than `max_len`, no data is copied. + +**Output structure format:** +```c +struct Attr { + value_id: u32, // ID of the attribute value + name_len: u32, // Length of attribute name in bytes +} +``` + +Each attribute is 8 bytes (2 × 4 bytes). Use `copy_attrname` to retrieve attribute names. + +#### `copy_attrname(value: ValueId, attr_idx: u32, ptr: u32, len: u32)` + +Copies an attribute name into Wasm memory. + +**Parameters:** +- `value` - ID of a Nix attribute set value +- `attr_idx` - Index of the attribute (from `copy_attrset`) +- `ptr` - Pointer to buffer in Wasm memory +- `len` - Length of the buffer (must exactly match the attribute name length) + +**Note:** Throws an error if `len` doesn't match the attribute name length or if `attr_idx` is out of bounds. + +#### `get_attr(value: ValueId, ptr: u32, len: u32) -> ValueId` + +Gets an attribute value from an attribute set by name. + +**Parameters:** +- `value` - ID of a Nix attribute set value +- `ptr` - Pointer to the attribute name in Wasm memory +- `len` - Length of the attribute name in bytes + +**Returns:** Value ID of the attribute value, or 0 if the attribute doesn't exist + +### Function Operations + +#### `call_function(fun: ValueId, ptr: u32, len: u32) -> ValueId` + +Calls a Nix function with arguments. + +**Parameters:** +- `fun` - ID of a Nix function value +- `ptr` - Pointer to array of `ValueId` arguments in Wasm memory +- `len` - Number of arguments + +**Returns:** Value ID of the function result + +#### `make_app(fun: ValueId, ptr: u32, len: u32) -> ValueId` + +Creates a lazy or partially applied function application. + +**Parameters:** +- `fun` - ID of a Nix function value +- `ptr` - Pointer to array of `ValueId` arguments in Wasm memory +- `len` - Number of arguments + +**Returns:** Value ID of the unevaluated application + +### Returning Results (WASI mode only) + +#### `return_to_nix(value: ValueId)` + +Returns a result value to the Nix evaluator from a WASI module. This function is only available in WASI mode. + +**Parameters:** +- `value` - ID of the Nix value to return as the result of the `builtins.wasm` call + +**Note:** Calling this function immediately terminates the WASI module's execution. The module must call `return_to_nix` before finishing; otherwise, an error is raised. + +### File I/O + +#### `read_file(path: ValueId, ptr: u32, len: u32) -> u32` + +Reads a file into Wasm memory. + +**Parameters:** +- `path` - Value ID of a Nix path value +- `ptr` - Pointer to buffer in Wasm memory +- `len` - Maximum number of bytes to read + +**Returns:** The actual file size in bytes + +**Note:** Similar to `builtins.readFile`, but can handle files that cannot be represented as Nix strings (in particular, files containing NUL bytes). If the returned size is greater than `len`, no data is copied. + +## Example Usage + +For Rust bindings to this interface and several examples, see https://github.com/DeterminateSystems/nix-wasm-rust/. diff --git a/doc/manual/source/quick-start.md b/doc/manual/source/quick-start.md index 9eb7a3265903..42e4e9c0c247 100644 --- a/doc/manual/source/quick-start.md +++ b/doc/manual/source/quick-start.md @@ -3,10 +3,13 @@ This chapter is for impatient people who don't like reading documentation. For more in-depth information you are kindly referred to subsequent chapters. -1. Install Nix: +1. Install Nix. + We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. + For Linux and Windows Subsystem for Linux (WSL) users: ```console - $ curl -L https://nixos.org/nix/install | sh + $ curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install ``` The install script will use `sudo`, so make sure you have sufficient rights. @@ -41,3 +44,5 @@ For more in-depth information you are kindly referred to subsequent chapters. ```console $ nix-collect-garbage ``` + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/release-notes-determinate/changes.md b/doc/manual/source/release-notes-determinate/changes.md new file mode 100644 index 000000000000..5f795f31d5d8 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/changes.md @@ -0,0 +1,188 @@ +# Changes between Nix and Determinate Nix + +This section lists the differences between upstream Nix 2.33 and Determinate Nix 3.18.1. + +* In Determinate Nix, flakes are stable. You no longer need to enable the `flakes` experimental feature. + +* In Determinate Nix, the new Nix CLI (i.e. the `nix` command) is stable. You no longer need to enable the `nix-command` experimental feature. + +* Determinate Nix has a setting [`json-log-path`](@docroot@/command-ref/conf-file.md#conf-json-log-path) to send a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. + +* Determinate Nix has made `nix profile install` an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. + +* `nix-channel` and `channel:` url syntax (like `channel:nixos-24.11`) is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/34 + +* Using indirect flake references and implicit inputs is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/37 + +* Warnings around "dirty trees" are updated to reduce "dirty" jargon, and now refers to "uncommitted changes". + + + + + + + +* `nix upgrade-nix` is now inert, and suggests using `determinate-nixd upgrade`. [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) + +* Determinate Nix has Lazy Trees, avoiding expensive copying of flake inputs to the Nix store. ([DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27), [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56)) + + + + + + + + + +* Documentation on how to replicate `nix-store --query --deriver` with the new `nix` cli. [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) + +* In `nix profile`, the symbols `ε` and `∅` have been replaced with descriptive English words. [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) + + + + + + + +* When remote building with `--keep-failed`, Determinate Nix shows "you can rerun" message if the derivation's platform is supported on this machine. [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) + +* Improved error message when `sandbox-paths` specifies a missing file. [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) + + + + + + + + + +* `nix store delete` now explains why deletion fails. [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + + + + + + + + + + + + + +* Tab completing arguments to Nix avoids network access. [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +* Importing Nixpkgs and other tarballs to the cache is 2-4x faster. [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +* Adding paths to the store is significantly faster. [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + + + + + +* Determinate Nix allows flake inputs to be fetched at build time. [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + + + +* The default `nix flake init` template is much more useful. [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + + + + + + + + +* Multithreaded evaluation support. [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + + + + + + +* Determinate Nix only tries to substitute inputs if fetching from its original location fails.[DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + + + + + +* A new command `nix nario` that replaces `nix-store --export|--export`. It also has a new file format (`--format 2`) that supports store path attributes such as signatures, and that can be imported more efficiently. [DeterminateSystems/nix-src#215](https://github.com/DeterminateSystems/nix-src/pull/215) + +* Determinate Nix prints the Nix version when using `-vv` or higher verbosity. [DeterminateSystems/nix-src#237](https://github.com/DeterminateSystems/nix-src/pull/237) + + + + +* During evaluation, you can read or import from the result of `builtins.fetchClosure`. [DeterminateSystems/nix-src#241](https://github.com/DeterminateSystems/nix-src/pull/241) + + + +* Flakerefs in error messages and lockfile diffs are abbreviated for readability. [DeterminateSystems/nix-src#243](https://github.com/DeterminateSystems/nix-src/pull/243), [DeterminateSystems/nix-src#264](https://github.com/DeterminateSystems/nix-src/pull/264) + + + + + + + + +* The Git fetcher doesn't compute `revCount` or `lastModified` if they're already specified [DeterminateSystems./nix-src#269](https://github.com/DeterminateSystems/nix-src/pull/269) + +* The Git fetcher avoids doing a shallow Git fetch if it previously did a non-shallow fetch of the same repository. [DeterminateSystems/nix-src#270](https://github.com/DeterminateSystems/nix-src/pull/270) + +* Determinate Nix has a builtin copy of the flake registry, making it more resilient to network outages. [DeterminateSystems/nix-src#271](https://github.com/DeterminateSystems/nix-src/pull/271) + + + +* `nix build` and `nix profile` report failing or succeeding installables. [DeterminateSystems/nix-src#281](https://github.com/DeterminateSystems/nix-src/pull/281) + +* `nix flake check` shows which outputs failed or succeeded. [DeterminateSystems/nix-src#285](https://github.com/DeterminateSystems/nix-src/pull/285) + +* Determinate Nix has a `nix ps` command to show active builds. [DeterminateSystems/nix-src#282](https://github.com/DeterminateSystems/nix-src/pull/282) + +* Determinate Nix has improved backward compatibility with lock files created by Nix < 2.20. [DeterminateSystems/nix-src#278](https://github.com/DeterminateSystems/nix-src/pull/278) + + + +* Determinate Nix has a builtin function `builtins.filterAttrs`. [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +* `builtins.fetchTree` implicitly sets `__final = true` when a `narHash` is supplied. This allows the tree to be substituted. [DeterminateSystems/nix-src#297](https://github.com/DeterminateSystems/nix-src/pull/297) + + + + + + + +* Determinate Nix has an experimental builtin `builtins.wasm` that allows the Nix language to be extended using any language that compiles to Wasm. [DeterminateSystems/nix-src#309](https://github.com/DeterminateSystems/nix-src/pull/309) + +* `builtins.getFlake` supports path values. [DeterminateSystems/nix-src#338](https://github.com/DeterminateSystems/nix-src/pull/338) + +* Determinate Nix has support for keeping track of the provenance of store paths. [DeterminateSystems/nix-src#321](https://github.com/DeterminateSystems/nix-src/pull/321) + + + + + + + + + + + + + + + + + + + + +* Determinate Nix can upload crash info to Sentry. [DeterminateSystems/nix-src#418](https://github.com/DeterminateSystems/nix-src/pull/418) + +* Determinate Nix provides the pre-build hook with a JSON serialization of the derivation. [DeterminateSystems/nix-src#424](https://github.com/DeterminateSystems/nix-src/pull/424) + + + diff --git a/doc/manual/source/release-notes-determinate/index.md b/doc/manual/source/release-notes-determinate/index.md new file mode 100644 index 000000000000..bba33084424c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/index.md @@ -0,0 +1,3 @@ +# Determinate Nix Release Notes + +This chapter lists the differences between Nix and Determinate Nix, as well as the release history of Determinate Nix. diff --git a/doc/manual/source/release-notes-determinate/rl-3.0.0.md b/doc/manual/source/release-notes-determinate/rl-3.0.0.md new file mode 100644 index 000000000000..d60786e9a72f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.0.0.md @@ -0,0 +1,5 @@ +# Release 3.0.0 (2025-03-04) + +* Initial release of Determinate Nix. + +* Based on [upstream Nix 2.26.2](../release-notes/rl-2.26.md). diff --git a/doc/manual/source/release-notes-determinate/rl-3.1.0.md b/doc/manual/source/release-notes-determinate/rl-3.1.0.md new file mode 100644 index 000000000000..96b7819d08db --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.1.0.md @@ -0,0 +1,5 @@ +# Release 3.1.0 (2025-03-27) + +* Based on [upstream Nix 2.27.1](../release-notes/rl-2.27.md). + +* New setting `json-log-path` that sends a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. diff --git a/doc/manual/source/release-notes-determinate/rl-3.3.0.md b/doc/manual/source/release-notes-determinate/rl-3.3.0.md new file mode 100644 index 000000000000..badf96415df0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.3.0.md @@ -0,0 +1,5 @@ +# Release 3.3.0 (2025-04-11) + +* Based on [upstream Nix 2.28.1](../release-notes/rl-2.28.md). + +* The `nix profile install` command is now an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.0.md b/doc/manual/source/release-notes-determinate/rl-3.4.0.md new file mode 100644 index 000000000000..24ae03ca554f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.0.md @@ -0,0 +1,50 @@ +# Release 3.4.0 (2025-04-25) + +* Based on [upstream Nix 2.28.2](../release-notes/rl-2.28.md). + +* **Warn users that `nix-channel` is deprecated.** + +This is the first change accomplishing our roadmap item of deprecating Nix channels: https://github.com/DeterminateSystems/nix-src/issues/34 + +This is due to user confusion and surprising behavior of channels, especially in the context of user vs. root channels. + +The goal of this change is to make the user experience of Nix more predictable. +In particular, these changes are to support users with lower levels of experience who are following guides that focus on channels as the mechanism of distribution. + +Users will now see this message: + +> nix-channel is deprecated in favor of flakes in Determinate Nix. For a guide on Nix flakes, see: https://zero-to-nix.com/. or details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + + +* **Warn users that `channel:` URLs are deprecated.** + +This is the second change regarding our deprecation of Nix channels. +Using a `channel:` URL (like `channel:nixos-24.11`) will yield a warning like this: + +> Channels are deprecated in favor of flakes in Determinate Nix. Instead of 'channel:nixos-24.11', use 'https://nixos.org/channels/nixos-24.11/nixexprs.tar.xz'. For a guide on Nix flakes, see: https://zero-to-nix.com/. For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + +* **Warn users against indirect flake references in `flake.nix` inputs** + +This is the first change accomplishing our roadmap item of deprecating implicit and indirect flake inputs: https://github.com/DeterminateSystems/nix-src/issues/37 + +The flake registry provides an important UX affordance for using Nix flakes and remote sources in command line uses. +For that reason, the registry is not being deprecated entirely and will still be used for command-line incantations, like nix run. + +This move will eliminate user confusion and surprising behavior around global and local registries during flake input resolution. + +The goal of this change is to make the user experience of Nix more predictable. +We have seen a pattern of confusion when using automatic flake inputs and local registries. +Specifically, users' flake inputs resolving and locking inconsistently depending on the configuration of the host system. + +Users will now see the following warning if their flake.nix uses an implicit or indirect Flake reference input: + +> Flake input 'nixpkgs' uses the flake registry. Using the registry in flake inputs is deprecated in Determinate Nix. To make your flake future-proof, add the following to 'xxx/flake.nix': +> +> inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; +> +> For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37 + + +### Other updates: +* Improve the "dirty tree" message. Determinate Nix will now say `Git tree '...' has uncommitted changes` instead of `Git tree '...' is dirty` +* Stop warning about uncommitted changes in a Git repository when using `nix develop` diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.2.md b/doc/manual/source/release-notes-determinate/rl-3.4.2.md new file mode 100644 index 000000000000..8acabd4425fd --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.2.md @@ -0,0 +1,4 @@ +# Release 3.4.2 (2025-05-05) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.0.md b/doc/manual/source/release-notes-determinate/rl-3.5.0.md new file mode 100644 index 000000000000..d5b26b9419e7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.0.md @@ -0,0 +1,4 @@ +# Release 3.5.0 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.1.md b/doc/manual/source/release-notes-determinate/rl-3.5.1.md new file mode 100644 index 000000000000..b0813ca59c90 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.1.md @@ -0,0 +1,57 @@ +# Release 3.5.1 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed + +Most notably, Lazy Trees has merged in to Determinate Nix and is in Feature Preview status, but remains disabled by default. +Lazy trees massively improves performance in virtually all scenarios because it enables Nix to avoid making unnecessary copies of files into the Nix store. +In testing, we saw iteration times on Nixpkgs **drop from over 12 seconds to 3.5 seconds**. + +After upgrading to Determinate Nix 3.5.1 with `sudo determinate-nixd upgrade`, enable lazy trees by adding this to `/etc/nix/nix.custom.conf`: + +``` +lazy-trees = true +``` + +Please note that our full flake regression test suite passes with no changes with lazy trees, and please report compatibility issues. + +Read [this GitHub comment](https://github.com/DeterminateSystems/nix-src/pull/27#pullrequestreview-2822153088) for further details and next steps. +We'll be publishing an update on the [Determinate Systems blog](https://determinate.systems/posts/) in the next few days with more information as well. + +Relevant PRs: +* Lazy trees v2 by @edolstra in [DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27) +* Improve lazy trees backward compatibility by @edolstra in [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56) + + +### Additional changes in this release: +* Bug fix: Flake input URLs are canonicalized before checking flake.lock file staleness, avoiding needlessly regenerating flake.lock files with `dir` in URL-style flakerefs by @edolstra in [DeterminateSystems/nix-src#57](https://github.com/DeterminateSystems/nix-src/pull/57) +* `nix upgrade-nix` is deprecated in favor of `determinate-nixd upgrade`, by @gustavderdrache in [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) +* UX: Improved build failure and dependency failure error messages to include needed output paths by @edolstra in [DeterminateSystems/nix-src#58](https://github.com/DeterminateSystems/nix-src/pull/58). + +Previously: + +``` +error: builder for '/nix/store/[...]-nested-failure-bottom.drv' failed with exit code 1 +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-middle.drv' failed to build +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-top.drv' failed to build +``` + +Now: + +``` +error: Cannot build '/nix/store/w37gflm9wz9dcnsgy3sfrmnlvm8qigaj-nested-failure-bottom.drv'. + Reason: builder failed with exit code 1. + Output paths: + /nix/store/yzybs8kp35dfipbzdlqcc6lxz62hax04-nested-failure-bottom +error: Cannot build '/nix/store/00gr5hlxfc03x2675w6nn3pwfrz2fr62-nested-failure-middle.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/h781j5h4bdchmb4c2lvy8qzh8733azhz-nested-failure-middle +error: Cannot build '/nix/store/8am0ng1gyx8sbzyr0yx6jd5ix3yy5szc-nested-failure-top.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/fh12637kgvp906s9yhi9w2dc7ghfwxs1-nested-failure-top +``` + +**Full Changelog**: [v3.4.2...v3.5.1](https://github.com/DeterminateSystems/nix-src/compare/v3.4.2...v3.5.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.2.md b/doc/manual/source/release-notes-determinate/rl-3.5.2.md new file mode 100644 index 000000000000..bc5396c255b6 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.2.md @@ -0,0 +1,11 @@ +# Release 3.5.2 (2025-05-12) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed +* Fix a regression where narHash was not added to lock files when lazy trees were disabled by @edolstra in [DeterminateSystems/nix-src#63](https://github.com/DeterminateSystems/nix-src/pull/63) + +* Tell users a source is corrupted ("cannot read file from tarball: Truncated tar archive detected while reading data"), improving over the previous 'cannot read file from tarball' error by @edolstra in [DeterminateSystems/nix-src#64](https://github.com/DeterminateSystems/nix-src/pull/64) + + +**Full Changelog**: [v3.5.1...v3.5.2](https://github.com/DeterminateSystems/nix-src/compare/v3.5.1...v3.5.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.0.md b/doc/manual/source/release-notes-determinate/rl-3.6.0.md new file mode 100644 index 000000000000..453ab6c301dc --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.0.md @@ -0,0 +1,11 @@ +# Release 3.6.0 (2025-05-22) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Install 'nix profile add' manpage by @edolstra in [DeterminateSystems/nix-src#69](https://github.com/DeterminateSystems/nix-src/pull/69) +* Sync with upstream 2.29.0 by @edolstra in [DeterminateSystems/nix-src#67](https://github.com/DeterminateSystems/nix-src/pull/67) +* Emit warnings when using import-from-derivation by setting the `trace-import-from-derivation` option to `true` by @gustavderdrache in [DeterminateSystems/nix-src#70](https://github.com/DeterminateSystems/nix-src/pull/70) + + +**Full Changelog**: [v3.5.2...v3.6.0](https://github.com/DeterminateSystems/nix-src/compare/v3.5.2...v3.6.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.1.md b/doc/manual/source/release-notes-determinate/rl-3.6.1.md new file mode 100644 index 000000000000..12505afee278 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.1.md @@ -0,0 +1,9 @@ +# Release 3.6.1 (2025-05-24) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Fix nlohmann error in fromStructuredAttrs() by @edolstra in [DeterminateSystems/nix-src#73](https://github.com/DeterminateSystems/nix-src/pull/73) + + +**Full Changelog**: [v3.6.0...v3.6.1](https://github.com/DeterminateSystems/nix-src/compare/v3.6.0...v3.6.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.2.md b/doc/manual/source/release-notes-determinate/rl-3.6.2.md new file mode 100644 index 000000000000..882c142f00c3 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.2.md @@ -0,0 +1,15 @@ +# Release 3.6.2 (2025-06-02) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Dramatically improve the performance of nix store copy-sigs: Use http-connections setting to control parallelism by @edolstra in [DeterminateSystems/nix-src#80](https://github.com/DeterminateSystems/nix-src/pull/80) +* Document how to replicate nix-store --query --deriver with the nix cli by @grahamc in [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) +* The garbage collector no longer gives up if it encounters an undeletable file, by @edolstra in [DeterminateSystems/nix-src#83](https://github.com/DeterminateSystems/nix-src/pull/83) +* nix profile: Replace ε and ∅ with descriptive English words by @grahamc in [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) +* Rework README to clarify that this distribution is our distribution, by @lucperkins in [DeterminateSystems/nix-src#84](https://github.com/DeterminateSystems/nix-src/pull/84) +* Include the source location when warning about inefficient double copies by @edolstra in [DeterminateSystems/nix-src#79](https://github.com/DeterminateSystems/nix-src/pull/79) +* Call out that `--keep-failed` with remote builders will keep the failed build directory on that builder by @cole-h in [DeterminateSystems/nix-src#85](https://github.com/DeterminateSystems/nix-src/pull/85) + + +**Full Changelog**: [v3.6.1...v3.6.2](https://github.com/DeterminateSystems/nix-src/compare/v3.6.1...v3.6.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.5.md b/doc/manual/source/release-notes-determinate/rl-3.6.5.md new file mode 100644 index 000000000000..8ef5be0fd0d3 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.5.md @@ -0,0 +1,19 @@ +# Release 3.6.5 (2025-06-12) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* When remote building with --keep-failed, only show "you can rerun" message if the derivation's platform is supported on this machine by @cole-h in [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) +* Indicate that sandbox-paths specifies a missing file in the corresponding error message. by @cole-h in [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) +* Render lazy tree paths in messages withouth the/nix/store/hash... prefix in substituted source trees by @edolstra in [DeterminateSystems/nix-src#91](https://github.com/DeterminateSystems/nix-src/pull/91) +* Use FlakeHub inputs by @lucperkins in [DeterminateSystems/nix-src#89](https://github.com/DeterminateSystems/nix-src/pull/89) +* Proactively cache more flake inputs and fetches by @edolstra in [DeterminateSystems/nix-src#93](https://github.com/DeterminateSystems/nix-src/pull/93) +* Fix: register extra builtins just once by @edolstra in [DeterminateSystems/nix-src#97](https://github.com/DeterminateSystems/nix-src/pull/97) +* Fix the link to `builders-use-substitutes` documentation for `builders` by @lucperkins in [DeterminateSystems/nix-src#102](https://github.com/DeterminateSystems/nix-src/pull/102) +* Improve error messages that use the hypothetical future tense of "will" by @lucperkins in [DeterminateSystems/nix-src#92](https://github.com/DeterminateSystems/nix-src/pull/92) +* Make the `nix repl` test more stable by @edolstra in [DeterminateSystems/nix-src#103](https://github.com/DeterminateSystems/nix-src/pull/103) +* Run nixpkgsLibTests against lazy trees by @edolstra in [DeterminateSystems/nix-src#100](https://github.com/DeterminateSystems/nix-src/pull/100) +* Run the Nix test suite against lazy trees by @edolstra in [DeterminateSystems/nix-src#105](https://github.com/DeterminateSystems/nix-src/pull/105) +* Improve caching of inputs by @edolstra in [DeterminateSystems/nix-src#98](https://github.com/DeterminateSystems/nix-src/pull/98), [DeterminateSystems/nix-src#110](https://github.com/DeterminateSystems/nix-src/pull/110), and [DeterminateSystems/nix-src#115](https://github.com/DeterminateSystems/nix-src/pull/115) + +**Full Changelog**: [v3.6.2...v3.6.5](https://github.com/DeterminateSystems/nix-src/compare/v3.6.2...v3.6.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.6.md b/doc/manual/source/release-notes-determinate/rl-3.6.6.md new file mode 100644 index 000000000000..bf4e3690afa1 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.6.md @@ -0,0 +1,7 @@ +# Release 3.6.6 (2025-06-17) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed + +* No-op release on the nix-src side, due to a regression on nix-darwin in determinate-nixd. diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.7.md b/doc/manual/source/release-notes-determinate/rl-3.6.7.md new file mode 100644 index 000000000000..197587f1b3a9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.7.md @@ -0,0 +1,17 @@ +# Release 3.6.7 (2025-06-24) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Security contents + +* Patched against GHSA-g948-229j-48j3 + +### Lazy trees: + +* Lazy trees now produces `flake.lock` files with NAR hashes unless `lazy-locks` is set to `true` by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Improved caching with lazy-trees when using --impure, with enhanced testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) + + +**Full Changelog**: [v3.6.6...v3.6.7](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.7) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.8.md b/doc/manual/source/release-notes-determinate/rl-3.6.8.md new file mode 100644 index 000000000000..c4b4b96c9e73 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.8.md @@ -0,0 +1,12 @@ +# Release 3.6.8 (2025-06-25) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed +* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) +* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Sync 2.29.1 by @edolstra in [DeterminateSystems/nix-src#124](https://github.com/DeterminateSystems/nix-src/pull/124) +* Release v3.6.7 by @github-actions in [DeterminateSystems/nix-src#126](https://github.com/DeterminateSystems/nix-src/pull/126) + + +**Full Changelog**: [v3.6.6...v3.6.8](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.8) diff --git a/doc/manual/source/release-notes-determinate/rl-3.7.0.md b/doc/manual/source/release-notes-determinate/rl-3.7.0.md new file mode 100644 index 000000000000..615e858592e2 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.7.0.md @@ -0,0 +1,63 @@ +# Release 3.7.0 (2025-07-03) + +- Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Prefetch flake inputs in parallel + +By @edolstra in [DeterminateSystems/nix-src#127](https://github.com/DeterminateSystems/nix-src/pull/127) + +This release brings the command `nix flake prefetch-inputs`. + +Flake inputs are typically fetched "just in time." +That means Nix fetches a flake input when the evaluator needs it, and not before. +When the evaluator needs an input, evaluation is paused until the source is available. + +This causes a significant slow-down on projects with lots of flake inputs. + +The new command `nix flake prefetch-inputs` fetches all flake inputs in parallel. +We expect running this new command before building will dramatically improve evaluation performance for most projects, especially in CI. +Note that projects which with many unused flake inputs may not benefit from this change, since the new command fetches every input whether they're used or not. + +### Deep flake input overrides now work as expected + +By @edolstra in [DeterminateSystems/nix-src#108](https://github.com/DeterminateSystems/nix-src/pull/108) + +An override like: + +``` +inputs.foo.inputs.bar.inputs.nixpkgs.follows = "nixpkgs"; +``` + +implicitly set `inputs.foo.inputs.bar` to `flake:bar`, which led to an unexpected error like: + +``` +error: cannot find flake 'flake:bar' in the flake registries +``` + +We now no longer create a parent override (like for `foo.bar` in the example above) if it doesn't set an explicit ref or follows attribute. +We only recursively apply its child overrides. + +### `nix store delete` now shows you why deletion was not possible + +By @edolstra in [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + +For example: + +``` +error: Cannot delete path '/nix/store/6fcrjgfjip2ww3sx51rrmmghfsf60jvi-patchelf-0.14.3' + because it's referenced by the GC root '/home/eelco/Dev/nix-master/build/result'. + +error: Cannot delete path '/nix/store/lf3lrf8bjfn8xvr0az9q96y989sxs5r9-cowsay-3.8.4' + because it's referenced by the GC root '/proc/3600568/environ'. + +error: Cannot delete path '/nix/store/klyng5rpdkwi5kbxkncy4gjwb490dlhb-foo.drv' + because it's in use by '{nix-process:3605324}'. +``` + +### Lazy-tree improvements + +- Improved lazy-tree evaluation caching for flakes accessed with a `path` flakeref by @edolstra in [DeterminateSystems/nix-src#131](https://github.com/DeterminateSystems/nix-src/pull/131) + +**Full Changelog**: [v3.6.8...v3.7.0](https://github.com/DeterminateSystems/nix-src/compare/v3.6.8...v3.7.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.0.md b/doc/manual/source/release-notes-determinate/rl-3.8.0.md new file mode 100644 index 000000000000..4103d6df94e0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.0.md @@ -0,0 +1,29 @@ +# Release 3.8.0 (2025-07-10) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed + +### Faster CI with `nix flake check` + +`nix flake check` no longer downloads flake outputs if no building is necessary. + +This command is intended to validate that a flake can fully evaluate and all outputs can build. +If the outputs are available in a binary cache then both properties are confirmed to be true. +Notably, downloading the output from the binary cache is not strictly necessary for the validation. + +Previously, `nix flake check` would download a flake output if the full build is available in a binary cache. + +Some users will find this change significantly reduces costly bandwidth and CI workflow time. + +PR: [DeterminateSystems/nix-src#134](https://github.com/DeterminateSystems/nix-src/pull/134) + +### Improved flake locking of transitive dependencies + +Determinate Nix now re-locks all transitive dependencies when changing a flake input's source URL. + +This fixes an issue where in some scenarios Nix would not re-lock those inputs and incorrectly use the old inputs' dependencies. + +PR: [DeterminateSystems/nix-src#137](https://github.com/DeterminateSystems/nix-src/pull/137) + +**Full Changelog**: [v3.7.0...v3.8.0](https://github.com/DeterminateSystems/nix-src/compare/v3.7.0...v3.8.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.1.md b/doc/manual/source/release-notes-determinate/rl-3.8.1.md new file mode 100644 index 000000000000..90dc328f6ec2 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.1.md @@ -0,0 +1,9 @@ +# Release 3.8.1 (2025-07-11) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* Address ifdef problem with macOS/BSD sandboxing by @gustavderdrache in [DeterminateSystems/nix-src#142](https://github.com/DeterminateSystems/nix-src/pull/142) + + +**Full Changelog**: [v3.8.0...v3.8.1](https://github.com/DeterminateSystems/nix-src/compare/v3.8.0...v3.8.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.2.md b/doc/manual/source/release-notes-determinate/rl-3.8.2.md new file mode 100644 index 000000000000..638d90f6841b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.2.md @@ -0,0 +1,10 @@ +# Release 3.8.2 (2025-07-12) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* ci: don't run the full test suite for x86_64-darwin by @grahamc in [DeterminateSystems/nix-src#144](https://github.com/DeterminateSystems/nix-src/pull/144) +* Try publishing the manual again by @grahamc in [DeterminateSystems/nix-src#145](https://github.com/DeterminateSystems/nix-src/pull/145) + + +**Full Changelog**: [v3.8.1...v3.8.2](https://github.com/DeterminateSystems/nix-src/compare/v3.8.1...v3.8.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.3.md b/doc/manual/source/release-notes-determinate/rl-3.8.3.md new file mode 100644 index 000000000000..d3eb02bc7ea5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.3.md @@ -0,0 +1,26 @@ +# Release 3.8.3 (2025-07-18) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed + +### Non-blocking evaluation caching + +Users reported evaluation would occasionally block other evaluation processes. + +The evaluation cache database is now opened in write-ahead mode to prevent delaying evaluations. + +PR: [DeterminateSystems/nix-src#150](https://github.com/DeterminateSystems/nix-src/pull/150) + +### New experimental feature: `external-builders` + +This experimental feature allows Nix to call an external program for the build environment. + +The interface and behavior of this feature may change at any moment without a correspondingly major semver version change. + +PRs: +- [DeterminateSystems/nix-src#141](https://github.com/DeterminateSystems/nix-src/pull/141) +- [DeterminateSystems/nix-src#152](https://github.com/DeterminateSystems/nix-src/pull/152) +- [DeterminateSystems/nix-src#78](https://github.com/DeterminateSystems/nix-src/pull/78) + +**Full Changelog**: [v3.8.2...v3.8.3](https://github.com/DeterminateSystems/nix-src/compare/v3.8.2...v3.8.3) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.4.md b/doc/manual/source/release-notes-determinate/rl-3.8.4.md new file mode 100644 index 000000000000..7c73e75ca023 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.4.md @@ -0,0 +1,9 @@ +# Release 3.8.4 (2025-07-21) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed +* Revert "Use WAL mode for SQLite cache databases" by @grahamc in [DeterminateSystems/nix-src#155](https://github.com/DeterminateSystems/nix-src/pull/155) + + +**Full Changelog**: [v3.8.3...v3.8.4](https://github.com/DeterminateSystems/nix-src/compare/v3.8.3...v3.8.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.5.md b/doc/manual/source/release-notes-determinate/rl-3.8.5.md new file mode 100644 index 000000000000..0f1bbe6f99d7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.5.md @@ -0,0 +1,58 @@ +## What's Changed + +### Less time "unpacking into the Git cache" + +Unpacking sources into the user's cache is now takes 1/2 to 1/4 of the time it used to. +Previously, Nix serially unpacked sources into the cache. +This change takes better advantage of our users' hardware by parallelizing the import. +Real life testing shows an initial Nixpkgs import takes 3.6s on Linux, when it used to take 11.7s. + +PR: [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +### Copy paths to the daemon in parallel + +Determinate Nix's evaluator no longer blocks evaluation when copying paths to the store. +Previously, Nix would pause evaluation when it needed to add files to the store. +Now, the copying is performed in the background allowing evaluation to proceed. + +PR: [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + +### Faster Nix evaluation by reducing duplicate Nix daemon queries + +Determinate Nix more effectively caches store path validity data within a single evaluation. +Previously, the Nix client would perform many thousands of exra Nix daemon requests. +Each extra request takes real time, and this change reduced a sample evaluation by over 12,000 requests. + +PR: [DeterminateSystems/nix-src#157](https://github.com/DeterminateSystems/nix-src/pull/157) + +### More responsive tab completion + +Tab completion now implies the "--offline" flag, which disables most network requests. +Previously, tab completing Nix arguments would attempt to fetch sources and access binary caches. +Operating in offline mode improves the interactive experience of Nix when tab completing. + +PR: [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +### ZFS users: we fixed the mysterious stall. + +Opening the Nix database is usually instantaneous but sometimes has a several second latency. +Determinate Nix works around this issue, eliminating the frustrating random stall when running Nix commands. + +PR: [DeterminateSystems/nix-src#158](https://github.com/DeterminateSystems/nix-src/pull/158) + +### Other changes + +* Determinate Nix is now fully formatted by clang-format, making it easier than ever to contribute to the project. + +PR: [DeterminateSystems/nix-src#159](https://github.com/DeterminateSystems/nix-src/pull/159) + +* Determinate Nix is now based on upstream Nix 2.30.2. + +PR: [DeterminateSystems/nix-src#160](https://github.com/DeterminateSystems/nix-src/pull/160) + +* Determinate Nix now uses `main` as our development branch, moving away from `detsys-main`. + +PRs: +* [DeterminateSystems/nix-src#164](https://github.com/DeterminateSystems/nix-src/pull/164) +* [DeterminateSystems/nix-src#166](https://github.com/DeterminateSystems/nix-src/pull/166) + diff --git a/doc/manual/source/release-notes-determinate/v3.10.0.md b/doc/manual/source/release-notes-determinate/v3.10.0.md new file mode 100644 index 000000000000..c644dd787446 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.0.md @@ -0,0 +1,10 @@ +# Release 3.10.0 (2025-09-02) + +* Based on [upstream Nix 2.31.0](../release-notes/rl-2.31.md). + +## What's Changed + +This release rebases Determinate Nix on upstream Nix 2.31.0. + + +**Full Changelog**: [v3.9.1...v3.10.0](https://github.com/DeterminateSystems/nix-src/compare/v3.9.1...v3.10.0) diff --git a/doc/manual/source/release-notes-determinate/v3.10.1.md b/doc/manual/source/release-notes-determinate/v3.10.1.md new file mode 100644 index 000000000000..08cbe4fd0583 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.1.md @@ -0,0 +1,9 @@ +# Release 3.10.1 (2025-09-02) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +This release rebases Determinate Nix on upstream Nix 2.31.1. + + +**Full Changelog**: [v3.10.0...v3.10.1](https://github.com/DeterminateSystems/nix-src/compare/v3.10.0...v3.10.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.0.md b/doc/manual/source/release-notes-determinate/v3.11.0.md new file mode 100644 index 000000000000..7abb665a5a9f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.0.md @@ -0,0 +1,36 @@ +# Release 3.11.0 (2025-09-03) + +- Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Parallel evaluation + +The following commands are now able to evaluate Nix expressions in parallel: + +- `nix search` +- `nix flake check` +- `nix flake show` +- `nix eval --json` + +This is currently in developer preview, and we'll be turning it on for more users in the coming weeks. +If you would like to try it right away, specify `eval-cores` in your `/etc/nix/nix.custom.conf`: + +```ini +eval-cores = 0 # Evaluate across all cores +``` + +Further, we introduced a new builtin: `builtins.parallel`. +This new builtin allows users to explicitly parallelize evaluation within a Nix expression. + +Using this new builtin requires turning on an additional experimental feature: + +```ini +extra-experimental-features = parallel-eval +``` + +Please note that this new builtin is subject to change semantics or even go away during the developer preview. + +PR: [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + +**Full Changelog**: [v3.10.1...v3.11.0](https://github.com/DeterminateSystems/nix-src/compare/v3.10.1...v3.11.0) diff --git a/doc/manual/source/release-notes-determinate/v3.11.1.md b/doc/manual/source/release-notes-determinate/v3.11.1.md new file mode 100644 index 000000000000..305971643330 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.1.md @@ -0,0 +1,9 @@ +# Release 3.11.1 (2025-09-04) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +* Fix race condition in Value::isTrivial() by @edolstra in [DeterminateSystems/nix-src#192](https://github.com/DeterminateSystems/nix-src/pull/192) + + +**Full Changelog**: [v3.11.0...v3.11.1](https://github.com/DeterminateSystems/nix-src/compare/v3.11.0...v3.11.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.2.md b/doc/manual/source/release-notes-determinate/v3.11.2.md new file mode 100644 index 000000000000..ac4fe569dffe --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.2.md @@ -0,0 +1,24 @@ +# Release 3.11.2 (2025-09-12) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some interactions with the registry and flakes that include a `?dir=` parameter + +Some users were experiencing issues when their flake registry contained a flake that included a `?dir=` parameter, causing commands like `nix eval registry-with-flake-in-subdir#output` and those that used --inputs-from` to fail or behave incorrectly. + +This is now fixed, so use your flakes inside subdirs without fear! + +PRs: [DeterminateSystems/nix-src#196](https://github.com/DeterminateSystems/nix-src/pull/196), [DeterminateSystems/nix-src#199](https://github.com/DeterminateSystems/nix-src/pull/199) + +### Only substitute inputs if they haven't already been fetched + +When using `lazy-trees`, you might have noticed Nix fetching some source inputs from a cache, even though you could have sworn it already fetched those inputs! + +This fixes that behavior such that Nix will try to fetch inputs from their original location, and only if that fails fall back to fetching from a substituter. + +PR: [DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + +**Full Changelog**: [v3.11.1...v3.11.2](https://github.com/DeterminateSystems/nix-src/compare/v3.11.1...v3.11.2) diff --git a/doc/manual/source/release-notes-determinate/v3.11.3.md b/doc/manual/source/release-notes-determinate/v3.11.3.md new file mode 100644 index 000000000000..fab5ed51a4b5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.3.md @@ -0,0 +1,34 @@ +# Release 3.11.3 (2025-10-09) + +* Based on [upstream Nix 2.31.2](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some bugs and interactions with parallel eval + +We received some reports of parallel eval having issues, such as not being able to be interrupted, infinite recursion hanging forever, and segfaults when using the experimental `builtins.parallel`. + +Those have now been fixed. + +Additionally, the debugger now disables parallel eval, because the two features are incompatible. + +PRs: [DeterminateSystems/nix-src#206](https://github.com/DeterminateSystems/nix-src/pull/206), [DeterminateSystems/nix-src#213](https://github.com/DeterminateSystems/nix-src/pull/213), [DeterminateSystems/nix-src#218](https://github.com/DeterminateSystems/nix-src/pull/218), [DeterminateSystems/nix-src#205](https://github.com/DeterminateSystems/nix-src/pull/205) + +### `NIX_SSHOPTS` + `ssh-ng://root@localhost` fix + +We noticed that specifying `NIX_SSHOPTS=-p2222` when using a command that uses SSH (such as `nix copy --to ssh-ng://root@localhost`) stopped respecting the `NIX_SSHOPTS` setting because of an incorrect comparison. + +This has been fixed, so `NIX_SSHOPTS` and SSH stores that are accessed like `user@localhost` work again. + +PR: [DeterminateSystems/nix-src#219](https://github.com/DeterminateSystems/nix-src/pull/219) + +### Fix `error: [json.exception.type_error.302] type must be string, but is array` when using `exportReferencesGraph` + +We received a report of a `nix build` failing on a specific flake due to its expression using `exportReferencesGraph` with a heterogeneous array of dependencies, causing this inscrutable error. + +This specific case has been broken since Nix 2.29.0, and is now fixed. + +PRs: [DeterminateSystems/nix-src#221](https://github.com/DeterminateSystems/nix-src/pull/221), [DeterminateSystems/nix-src#225](https://github.com/DeterminateSystems/nix-src/pull/225) + + +**Full Changelog**: [v3.11.2...v3.11.3](https://github.com/DeterminateSystems/nix-src/compare/v3.11.2...v3.11.3) diff --git a/doc/manual/source/release-notes-determinate/v3.12.0.md b/doc/manual/source/release-notes-determinate/v3.12.0.md new file mode 100644 index 000000000000..55c1f10bf15f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.0.md @@ -0,0 +1,17 @@ +# Release 3.12.0 (2025-10-23) + +* Based on [upstream Nix 2.32.1](../release-notes/rl-2.32.md). + +## What's Changed + +### `nix nario` + +Determinate Nix has a new command, `nix nario`, that replaces the commands `nix-store --export` and `nix-store --import` from the old CLI. `nix nario` allows you to serialize store paths to a file that can be imported into another Nix store. It is backwards compatible with the file format generated by `nix-store --export`. It also provides a new format (selected by passing `--format 2`) that supports store path attributes such as signatures, and allows store paths to be imported more efficiently. + +### Other changes + +`nix flake clone` now supports arbitrary input types. In particular, this allows you to clone tarball flakes, such as flakes on FlakeHub. + +When using `-vv`, Determinate Nix now prints the Nix version. This is useful when diagnosing Nix problems from the debug output of a Nix run. + +**Full Changelog**: [v3.11.3...v3.12.0](https://github.com/DeterminateSystems/nix-src/compare/v3.11.3...v3.12.0) diff --git a/doc/manual/source/release-notes-determinate/v3.12.1.md b/doc/manual/source/release-notes-determinate/v3.12.1.md new file mode 100644 index 000000000000..1be2b48e26d8 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.1.md @@ -0,0 +1,10 @@ +# Release 3.12.1 (2025-11-04) + +* Based on [upstream Nix 2.32.1](../release-notes/rl-2.32.md). + +## What's Changed +* Allow access to the result of fetchClosure by @edolstra in [DeterminateSystems/nix-src#241](https://github.com/DeterminateSystems/nix-src/pull/241) +* libstore/build: fixup JSON logger missing the resBuildResult result event by @cole-h in [DeterminateSystems/nix-src#246](https://github.com/DeterminateSystems/nix-src/pull/246) + + +**Full Changelog**: [v3.12.0...v3.12.1](https://github.com/DeterminateSystems/nix-src/compare/v3.12.0...v3.12.1) diff --git a/doc/manual/source/release-notes-determinate/v3.12.2.md b/doc/manual/source/release-notes-determinate/v3.12.2.md new file mode 100644 index 000000000000..4c8c3169aa72 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.2.md @@ -0,0 +1,42 @@ +# Release 3.12.2 (2025-11-05) + +* Based on [upstream Nix 2.32.2](../release-notes/rl-2.32.md). + +## What's Changed + +### Faster `revCount` computation + +When using Git repositories with a long history, calculating the `revCount` attribute can take a long time. Determinate Nix now computes `revCount` using multiple threads, making it much faster. + +Note that if you don't need `revCount`, you can disable it altogether by setting the flake input attribute `shallow = true`. + +PR: [DeterminateSystems/nix-src#245](https://github.com/DeterminateSystems/nix-src/pull/245) + +### More readable error messages + +Previously, Nix showed full flakerefs in error messages such as stack traces, e.g. +``` + … from call site + at «github:NixOS/nixpkgs/3bea86e918d8b54aa49780505d2d4cd9261413be?narHash=sha256-Ica%2B%2BSXFuLyxX9Q7YxhfZulUif6/gwM8AEQYlUxqSgE%3D»/lib/customisation.nix:69:16: + 68| let + 69| result = f origArgs; + | ^ + 70| +``` +It now abbreviates these by leaving out `narHash` and shortening Git revisions: +``` + … from call site + at «github:NixOS/nixpkgs/3bea86e»/lib/customisation.nix:69:16: + 68| let + 69| result = f origArgs; + | ^ + 70| +``` + +PR: [DeterminateSystems/nix-src#243](https://github.com/DeterminateSystems/nix-src/pull/243) + +### Other changes + +This release fixes an assertion failure in `nix flake check`. PR: [DeterminateSystems/nix-src#252](https://github.com/DeterminateSystems/nix-src/pull/252) + +**Full Changelog**: [v3.12.1...v3.12.2](https://github.com/DeterminateSystems/nix-src/compare/v3.12.1...v3.12.2) diff --git a/doc/manual/source/release-notes-determinate/v3.13.0.md b/doc/manual/source/release-notes-determinate/v3.13.0.md new file mode 100644 index 000000000000..09041c2acda0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.0.md @@ -0,0 +1,45 @@ +# Release 3.13.0 (2025-11-09) + +* Based on [upstream Nix 2.32.3](../release-notes/rl-2.32.md). + +## What's Changed + + +### Git sources have a progress indicator again + +Nix used to feel "stuck" while it was cloning large repositories. +Determinate Nix now shows git's native progress indicator while fetching. + +PR: [DeterminateSystems/nix-src#250](https://github.com/DeterminateSystems/nix-src/pull/250) + +### C API improvements + +We've invested in the C API to support our work on closure analysis for SBOM generation, and made a couple of changes: + +* C API: add nix_locked_flake_read_path for flake file reading +* C API: make nix_store_get_fs_closure compatible with upstream + +PRs: +* [DeterminateSystems/nix-src#244](https://github.com/DeterminateSystems/nix-src/pull/244) +* [DeterminateSystems/nix-src#254](https://github.com/DeterminateSystems/nix-src/pull/254) + +### Dropping support for Intel Macs + +Determinate Nix no longer supports being installed on Intel Macs. +Determinate Nix will continue to support building for Intel macOS targets, but only from an Apple Silicon host. + +From our intent-to-ship: +> Over the past year, we’ve watched usage of Determinate on Intel macOS hosts dwindle to a minuscule fraction of total usage. +> It currently stands at approximately 0.02% of all installations. +> The vast majority are run in managed CI environments that, we anticipate, will be able to easily convert to using Apple Silicon runners. + +For more information: https://github.com/DeterminateSystems/nix-src/issues/224 + +PR: [DeterminateSystems/nix-src#257](https://github.com/DeterminateSystems/nix-src/pull/257) + +### Bugs fixed + +* IPv6 Store URLs now handles zone ID references like it did in previous releases [NixOS/nix#14434](https://github.com/NixOS/nix/pull/14434) + + +**Full Changelog**: [v3.12.2...v3.13.0](https://github.com/DeterminateSystems/nix-src/compare/v3.12.2...v3.13.0) diff --git a/doc/manual/source/release-notes-determinate/v3.13.1.md b/doc/manual/source/release-notes-determinate/v3.13.1.md new file mode 100644 index 000000000000..025a192c44ee --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.1.md @@ -0,0 +1,10 @@ +# Release 3.13.1 (2025-11-12) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What's Changed +* nix bundle: Wait for async path writer by @edolstra in [DeterminateSystems/nix-src#260](https://github.com/DeterminateSystems/nix-src/pull/260) +* Sync with upstream 2.32.4 by @edolstra in [DeterminateSystems/nix-src#261](https://github.com/DeterminateSystems/nix-src/pull/261) + + +**Full Changelog**: [v3.13.0...v3.13.1](https://github.com/DeterminateSystems/nix-src/compare/v3.13.0...v3.13.1) diff --git a/doc/manual/source/release-notes-determinate/v3.13.2.md b/doc/manual/source/release-notes-determinate/v3.13.2.md new file mode 100644 index 000000000000..2490b865e6bc --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.2.md @@ -0,0 +1,68 @@ +# Release 3.13.2 (2025-11-19) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What's Changed + +### Abbreviate flakerefs in lockfile diffs and `nix flake metadata` + +Flake refs are now abbreviated when possible, to reduce visual clutter. + +For example, this changes + +``` +• Updated input 'blender-bin': + 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.19/01993ca7-2aa8-746f-96f5-ca8d2c2b962d/source.tar.gz?narHash=sha256-ZqVhVl9UYVErF8HW8lcvqss005VWYjuX//rZ%2BOmXyHg%3D' (2025-09-12) + → 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.20/019a8772-b044-7738-8c03-109bdc9f0a01/source.tar.gz?narHash=sha256-sVj9Gmx0kwTDQPJ5kgQYszE3Hdjevu0zx0b/bL2fyUc%3D' (2025-11-15) +• Updated input 'nix': + 'github:DeterminateSystems/nix-src/236ebef6514f3a2a9765c8a1d80dd503b8e672be?narHash=sha256-s6/Err0yqOp5fM3OdCF1vhmEYpeElbPOWX88YrW2qj4%3D' (2025-10-23) + → 'github:DeterminateSystems/nix-src/ef054dc06e9701597bce0b0572af18cb4c7e7277?narHash=sha256-uqYmH0KA8caQqX5u4BMarZsuDlC%2B71HRsH3h4f3DPCA%3D' (2025-11-12) +``` + +to + +``` +• Updated input 'blender-bin': + 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.19/01993ca7-2aa8-746f-96f5-ca8d2c2b962d/source.tar.gz' (2025-09-12) + → 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.20/019a8772-b044-7738-8c03-109bdc9f0a01/source.tar.gz' (2025-11-15) +• Updated input 'nix': + 'github:DeterminateSystems/nix-src/236ebef' (2025-10-23) + → 'github:DeterminateSystems/nix-src/ef054dc' (2025-11-12) +``` + +PR: [DeterminateSystems/nix-src#264](https://github.com/DeterminateSystems/nix-src/pull/264) + +### `nix flake prefetch-inputs` now skips build-time inputs + +Build-time inputs can already be fetched in parallel, so prefetching them is usually not what you want. + +This can be especially noticeable in projects that make extensive use of build-time flake inputs. + +PR: [DeterminateSystems/nix-src#263](https://github.com/DeterminateSystems/nix-src/pull/263) + +### Don't compute `revCount`/`lastModified` if they're already specified + +We don't care if the user (or more likely the lock file) specifies an incorrect value for these attributes, since it doesn't matter for security (unlike content hashes like `narHash`). + +This can save time when operating on large repos -- having to recalculate these attributes could slow things down greatly. + +PR: [DeterminateSystems/nix-src#269](https://github.com/DeterminateSystems/nix-src/pull/269) + +### Avoid unnecessary Git refetches + +This fixes the issue where updating a Git input does a non-shallow fetch, and then a subsequent eval does a shallow refetch because the `revCount` is already known. + +Now the subsequent eval will reuse the repo used in the first fetch. + +PR: [DeterminateSystems/nix-src#270](https://github.com/DeterminateSystems/nix-src/pull/270) + +### Use our mirrored flake registry + +The flake registry is security-critical and thus should have high availability. + +By mirroring the upstream Nix flake registry, we can make it less likely that a GitHub outage affects being able to resolve from the registry. + +PR: [DeterminateSystems/nix-src#271](https://github.com/DeterminateSystems/nix-src/pull/271) + + +**Full Changelog**: [v3.13.1...v3.13.2](https://github.com/DeterminateSystems/nix-src/compare/v3.13.1...v3.13.2) diff --git a/doc/manual/source/release-notes-determinate/v3.14.0.md b/doc/manual/source/release-notes-determinate/v3.14.0.md new file mode 100644 index 000000000000..d72d5d21468c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.14.0.md @@ -0,0 +1,159 @@ +# Release 3.14.0 (2025-12-08) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What is going on?! `nix ps` to the rescue + +Determinate Nix now features a `nix ps` command to summarize all of the active builds and child processes: + +``` +$ nix ps +USER PID CPU DERIVATION/COMMAND +_nixbld1 30167 0.4s /nix/store/h431bcfml83czhpyzljhp9mw4yrq95vs-determinate-nix-manual-3.14.0.drv (wall=9s) +_nixbld1 30167 0.2s └───bash -e /nix/store/jwqf79v5p51x9mv8vx20fv9mzm2x7kig-source-stdenv.sh /nix/store/285whzixr5k1kfj6nidyj29mqqgv7n0b-default-builder.s +_nixbld1 30278 0.0s └───ninja -j14 +_nixbld1 30279 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30286 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix config show --json +_nixbld1 30280 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30287 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-language +_nixbld1 30281 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30288 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-cli +_nixbld1 30282 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30284 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-xp-features +_nixbld1 30283 0.0s └───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30285 0.0s └───/nix/store/bs1pvy8margy5sj0jwahchxbjnqzi14i-bash-5.2p37/bin/bash -euo pipefail -c if type -p build-release-notes > /de +_nixbld1 30289 0.0s └───changelog-d ../source/release-notes/../../rl-next +``` + +For the integrators out there, it also has a `--json` flag with all the raw data. + +PRs: +* [DeterminateSystems/nix-src#282](https://github.com/DeterminateSystems/nix-src/pull/282) +* [DeterminateSystems/nix-src#287](https://github.com/DeterminateSystems/nix-src/pull/287) + + +## Nix `build`, `profile`, and `flake check` commands tell you what output failed + +These commands now tell you exactly what flake outputs failed to build. +Previously, the error would indicate only what derivation failed to build -- but not which output. + +Now, `nix build` and `nix profile` commands provide the specific output: + +``` +$ nix build .#oneFakeHash .#badSystem --keep-going +❌ git+file:///Users/grahamc/src/github.com/DeterminateSystems/samples#oneFakeHash +error: hash mismatch in fixed-output derivation '/nix/store/58pp1y74j4f5zxfq50xncv2wvnxf7w3y-one-fake-hash.drv': + specified: sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= + got: sha256-i7j83d71sibS/ssSjLJ5PMKmbhjAM+BHW0aElvkgEwY= +❌ git+file:///Users/grahamc/src/github.com/DeterminateSystems/samples#badSystem +error: Cannot build '/nix/store/5vsaxi730yl2icngkyvn8wiflik5wfmq-bad-system.drv'. + Reason: required system or feature not available + Required system: 'bogus' with features {} + Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} +``` + +And in a great change for CI, `nix flake check` users get improved summaries too: + +``` +$ nix flake check +❓ checks.aarch64-darwin.twoFakeHashes (cancelled) +❓ checks.aarch64-darwin.badSystemNested (cancelled) +❓ checks.aarch64-darwin.oneFakeHash (cancelled) +❓ checks.aarch64-darwin.failure (cancelled) +❓ checks.aarch64-darwin.badSystem (cancelled) +❓ checks.aarch64-darwin.weirdHash (cancelled) +❓ checks.aarch64-darwin.all (cancelled) +❓ checks.aarch64-darwin.fakeHashes (cancelled) +❓ checks.aarch64-darwin.incorrectHashes (cancelled) +❓ checks.aarch64-darwin.badFeaturesNested (cancelled) +❓ checks.aarch64-darwin.failureNested (cancelled) +❌ checks.aarch64-darwin.badFeatures +error: Cannot build '/nix/store/sc1cyhrpsm9yjx55cl2zzyr5lypwigi6-bad-feature.drv'. + Reason: required system or feature not available + Required system: 'aarch64-darwin' with features {bogus} + Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} +``` + +PRs: +* [DeterminateSystems/nix-src#281](https://github.com/DeterminateSystems/nix-src/pull/281) +* [DeterminateSystems/nix-src#285](https://github.com/DeterminateSystems/nix-src/pull/285) + + +## More seamless upgrades from Nix 2.18 and Nix 2.19 + +We've heard from some users who are trying to upgrade from Nix 2.18. + +These users are primarily experiencing problems caused by Nix 2.20 switching from `git-archive` to `libgit2` for fetching repositories. +This change caused some `git-archive` filters to stop executing, like autocrlf. +Not running those filters is an improvement, and running those filters *can cause* instability in source hashes. +However, this switch *did* cause previously valid hashes to become invalid. + +Determinate Nix now retries fetching an old archive with `git-archive` as a fallback when libgit2 fails to provide the correct source. + +Further, to support a progressive migration Determinate Nix has a new option: `nix-219-compat`. +Set `nix-219-compat=true` to cause Nix to author new flake.nix files with a `git-archive` based source hash. + +Finally, a user identified `builtins.path` changed since 2.18 and stopped propagating references. +We have corrected this regression. + +PRs: +* [DeterminateSystems/nix-src#283](https://github.com/DeterminateSystems/nix-src/pull/283) +* [DeterminateSystems/nix-src#278](https://github.com/DeterminateSystems/nix-src/pull/278) + +## Flake registry mirroring + +Determinate Nix now includes a fallback copy of the Nix Registry. +This change builds on top of v3.13.2, where we changed from the upstream Nix registry to a mirrored copy hosted by `install.determinate.systems`. + +Combined, these changes increase the reliability of Nix in the face of network outages. + +> [!NOTE] +> Flake registry URLs for `flake.nix` inputs is deprecated. +> The flake registry should only be used for interactive use. +> See: https://github.com/DeterminateSystems/nix-src/issues/37 + +PR: [DeterminateSystems/nix-src#273](https://github.com/DeterminateSystems/nix-src/pull/273) + +## Flake registry resolution CLI + +We added the new command `nix registry resolve` to help debug issues with Flake registries. +This command looks up a flake registry input name and returns the flakeref it resolves to. + +For example, looking up Nixpkgs: + +``` +$ nix registry resolve nixpkgs +github:NixOS/nixpkgs/nixpkgs-unstable +``` + +Or looking up the 25.11 branch of Nixpkgs: +``` +$ nix registry resolve nixpkgs/release-25.11 +github:NixOS/nixpkgs/release-25.11 +``` + +> [!NOTE] +> Flake registry URLs for `flake.nix` inputs is deprecated. +> The flake registry should only be used for interactive use. +> See: https://github.com/DeterminateSystems/nix-src/issues/37 + +PR: [DeterminateSystems/nix-src#273](https://github.com/DeterminateSystems/nix-src/pull/273) + +## Improved Docker image packaging + +Thanks to `employee-64c7dcd530593118dcccc3fb`, the OCI / Docker images built by the Determinate Nix flake.nix can be further customized. + +Users can specify their own base image by specifying `fromImage`. + +Additionally, users can specify additional directories to include at the beginning or end of the PATH variable with `extraPrePaths` and `extraPostPaths`. + +PRs: +* [DeterminateSystems/nix-src#277](https://github.com/DeterminateSystems/nix-src/pull/277) +* [DeterminateSystems/nix-src#280](https://github.com/DeterminateSystems/nix-src/pull/280) + +## Bug fixes + +* Corrected an error with parallel evaluation which ([DeterminateSystems/nix-src#286](https://github.com/DeterminateSystems/nix-src/pull/286)) +* Fixed compatibility with updated Nixpkgs versions. Thank you SandaruKasa! ([DeterminateSystems/nix-src#284](https://github.com/DeterminateSystems/nix-src/pull/284)) + +**Full Changelog**: [v3.13.2...v3.14.0](https://github.com/DeterminateSystems/nix-src/compare/v3.13.2...v3.14.0) diff --git a/doc/manual/source/release-notes-determinate/v3.15.0.md b/doc/manual/source/release-notes-determinate/v3.15.0.md new file mode 100644 index 000000000000..fb568374c3f2 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.0.md @@ -0,0 +1,28 @@ +# Release 3.15.0 (2025-12-19) + +* Based on [upstream Nix 2.33.0](../release-notes/rl-2.33.md). + +## `fetchTree` improvement + +`builtins.fetchTree` now implicitly treats the fetched tree as "final" when a `narHash` is supplied, meaning that it will not return attributes like `lastModified` or `revCount` unless they were specified by the caller. This makes it possible to substitute the tree from a binary cache, which is often more efficient. Furthermore, for Git inputs, it allows Nix to perform a shallow fetch, which is much faster. + +This is primarily useful for users of `flake-compat`, since it uses `builtins.fetchTree` internally. + +PR: [DeterminateSystems/nix-src#297](https://github.com/DeterminateSystems/nix-src/pull/297) + +## New builtin function `builtins.filterAttrs` + +Nixpkgs heavily relies on this function to select attributes from an attribute set: + +```nix +filterAttrs = pred: set: removeAttrs set (filter (name: !pred name set.${name}) (attrNames set)); +``` + +Determinate Nix now has this function built-in, which makes it much faster. + +PR: [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +## New Contributors +* @not-ronjinger made their first contribution in [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +**Full Changelog**: [v3.14.0...v3.15.0](https://github.com/DeterminateSystems/nix-src/compare/v3.14.0...v3.15.0) diff --git a/doc/manual/source/release-notes-determinate/v3.15.1.md b/doc/manual/source/release-notes-determinate/v3.15.1.md new file mode 100644 index 000000000000..9243962cf4b5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.1.md @@ -0,0 +1,15 @@ +# Release 3.15.1 (2025-12-24) + +* Based on [upstream Nix 2.33.0](../release-notes/rl-2.33.md). + +## What's Changed +Users reported the v3.15.0 tarball could not be fetched in a fixed-output derivation due to current stdenv paths present in the documentation. This release eliminated those paths. + +PR: [DeterminateSystems/nix-src#306](https://github.com/DeterminateSystems/nix-src/pull/306) + +Additionally, this change re-enables CodeRabbit's code review on our changes. CodeRabit was disabled by the upstream project, and we inadvertently included that change. + +PR: [DeterminateSystems/nix-src#305](https://github.com/DeterminateSystems/nix-src/pull/305) + + +**Full Changelog**: [v3.15.0...v3.15.1](https://github.com/DeterminateSystems/nix-src/compare/v3.15.0...v3.15.1) diff --git a/doc/manual/source/release-notes-determinate/v3.15.2.md b/doc/manual/source/release-notes-determinate/v3.15.2.md new file mode 100644 index 000000000000..c5e5339990b5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.2.md @@ -0,0 +1,44 @@ +# Release 3.15.2 (2026-01-20) + +* Based on [upstream Nix 2.33.1](../release-notes/rl-2.33.md). + +## What's Changed + +### Improved performance for users with a lot of dependencies + +If you even had the occasion to query your binary cache for over 110,000 store path simultaneously you might have found it rather slow. +Previously, Nix would enqueue all the downloads at once. +This appears to trigger quadratic behavior in curl. + +Determinate Nix now enqueues a reasonable number of subtitutions once. +At the same time, we fixed a performance issue in the progress bar with so many dependencies. + +PR: [DeterminateSystems/nix-src#315](https://github.com/DeterminateSystems/nix-src/pull/315) + +### Lazy trees update: path inputs are now lazy + +Previously inputs like `path:///path/to/a/dependency` were eagerly fetched when lazy-trees is enabled. +In Determinate Nix 3.15.2, path input types are also fetched lazily. +This change saves time and improves performance for users with path inputs. + +PRs: +* [DeterminateSystems/nix-src#312](https://github.com/DeterminateSystems/nix-src/pull/312) +* [DeterminateSystems/nix-src#317](https://github.com/DeterminateSystems/nix-src/pull/317) + +### `nix repl` now reports the Determinate version + +A small change, but now `nix repl` correctly reports the Determinate Nix version: + +``` +$ nix repl +Nix (Determinate Nix 3.15.1) 2.33.0 +Type :? for help. +nix-repl> +``` + +PR: [DeterminateSystems/nix-src#316](https://github.com/DeterminateSystems/nix-src/pull/316) + +## New Contributors +* @dliberalesso made their first contribution in [DeterminateSystems/nix-src#313](https://github.com/DeterminateSystems/nix-src/pull/313) + +**Full Changelog**: [v3.15.1...v3.15.2](https://github.com/DeterminateSystems/nix-src/compare/v3.15.1...v3.15.2) diff --git a/doc/manual/source/release-notes-determinate/v3.16.0.md b/doc/manual/source/release-notes-determinate/v3.16.0.md new file mode 100644 index 000000000000..8e80ac68402a --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.0.md @@ -0,0 +1,53 @@ +# Release 3.16.0 (2026-02-12) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## Support `.gitattributes` in subdirectories + +For performance, the Git backwards compatibility hack was only applied to repositories that had a `.gitattributes` in the root directory. +However, it is possible to have a `.gitattributes` file in a subdirectory, and there are real-world repos that do this, so we have dropped that restriction. + +PR: [DeterminateSystems/nix-src#335](https://github.com/DeterminateSystems/nix-src/pull/335) + +## Fix hung downloads when `http-connections = 0` + +When we started limiting the number of active cURL handles in [DeterminateSystems/nix-src#315](https://github.com/DeterminateSystems/nix-src/pull/315), we did not take into account that `http-connections = 0` is a special value that means, roughly "as many connections as possible" (the exact behavior is up to cURL). + +This should now be fixed. + +PR: [DeterminateSystems/nix-src#327](https://github.com/DeterminateSystems/nix-src/pull/327) + +## `builtins.getFlake` now supports relative paths + +`builtins.getFlake` now supports using relative paths, like: + +```nix +builtins.getFlake ./.. +``` + +instead of the hacky + +```nix +builtins.getFlake (builtins.flakeRefToString { type = "path"; path = self.sourceInfo.outPath; narHash = self.narHash; }); +``` + +Note that allowing `builtins.getFlake` to fetch from store paths is probably a bad idea, since it's ambiguous when using chroot stores, so a warning will be printed when this is encountered. + +PRs: +* [DeterminateSystems/nix-src#337](https://github.com/DeterminateSystems/nix-src/pull/337) +* [DeterminateSystems/nix-src#338](https://github.com/DeterminateSystems/nix-src/pull/338) + +## Fixed a bug with too many open files + +Recently, some users have reported seeing errors like: + +``` +error: creating git packfile indexer: failed to create temporary file '/Users/anon/.cache/nix/tarball-cache-v2/objects/pack/pack_git2_56d617039ac17c2b': Too many open files +``` + +This should now be fixed. + +PR: [DeterminateSystems/nix-src#347](https://github.com/DeterminateSystems/nix-src/pull/347) + + +**Full Changelog**: [v3.15.2...v3.16.0](https://github.com/DeterminateSystems/nix-src/compare/v3.15.2...v3.16.0) diff --git a/doc/manual/source/release-notes-determinate/v3.16.1.md b/doc/manual/source/release-notes-determinate/v3.16.1.md new file mode 100644 index 000000000000..6ecd5262b7c9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.1.md @@ -0,0 +1,24 @@ +# Release 3.16.1 (2026-02-22) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed + +### `nix store info` now correctly support `--refresh` and `--offline` + +Previously, Nix had a hard-coded TTL of seven days. +Determinate Nix moved that TTL to a new setting `narinfo-cache-meta-ttl` and now `nix store info` respects the `--refresh` and `--offline` flags. + +This change makes it possible to freshly validate authenticating to a remote store. + +PR: [DeterminateSystems/nix-src#355](https://github.com/DeterminateSystems/nix-src/pull/355) + +### Corrected `builtins.hashString` behavior under lazy trees + +`builtins.hashString` now devirtualizes lazy paths, making the hash result stable. + +PR: [DeterminateSystems/nix-src#360](https://github.com/DeterminateSystems/nix-src/pull/360) + + + +**Full Changelog**: [v3.16.0...v3.16.1](https://github.com/DeterminateSystems/nix-src/compare/v3.16.0...v3.16.1) diff --git a/doc/manual/source/release-notes-determinate/v3.16.2.md b/doc/manual/source/release-notes-determinate/v3.16.2.md new file mode 100644 index 000000000000..73a1b25f21c8 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.2.md @@ -0,0 +1,8 @@ +# Release 3.16.2 (2026-02-23) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed +This release is exclusively improvements to `determinate-nixd`. + + diff --git a/doc/manual/source/release-notes-determinate/v3.16.3.md b/doc/manual/source/release-notes-determinate/v3.16.3.md new file mode 100644 index 000000000000..fcc6fefa33c7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.3.md @@ -0,0 +1,6 @@ +# Release 3.16.3 (2026-02-24) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed +This release only includes changes in determinate-nixd. diff --git a/doc/manual/source/release-notes-determinate/v3.17.0.md b/doc/manual/source/release-notes-determinate/v3.17.0.md new file mode 100644 index 000000000000..e09938786e55 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.17.0.md @@ -0,0 +1,10 @@ +# Release 3.17.0 (2026-03-04) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed +Determinate Nix 3.17.0 brings exciting improvements like Flake Schemas, provenance, and Wasm / WASI. +We'll be posting more details over the next week or so on our blog: https://determinate.systems/blog/. + + +**Full Changelog**: [v3.16.3...v3.17.0](https://github.com/DeterminateSystems/nix-src/compare/v3.16.3...v3.17.0) diff --git a/doc/manual/source/release-notes-determinate/v3.17.1.md b/doc/manual/source/release-notes-determinate/v3.17.1.md new file mode 100644 index 000000000000..19da0209931e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.17.1.md @@ -0,0 +1,49 @@ +# Release 3.17.1 (2026-03-18) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed + +### Provenance now supports "build-time" provenance tags + +Users can now specify key/value tags to attach to a build's provenance. +For example, the `nix-installer-action` now attaches the following properties to each build: + +- github_workflow_ref +- github_workflow_sha +- github_sha +- github_run_attempt +- github_run_id +- github_run_number +- github_job +- github_ref +- github_repository +- github_server_url + +PR: [DeterminateSystems/nix-src#374](https://github.com/DeterminateSystems/nix-src/pull/374) + +### Flake inputs are substituted when possible + +Locked flake inputs will be fetched from a binary cache when possible, instead of preferring the authoritative flake source. +This is intended to reduce load on code forges, and also improves the user experience on large flake inputs. + +PR: [DeterminateSystems/nix-src#380](https://github.com/DeterminateSystems/nix- + +### `nix profile upgrade` and `nix profile remove` now support tab completion + +PR: [DeterminateSystems/nix-src#382](https://github.com/DeterminateSystems/nix-src/pull/382) + +### Flake schemas can now define an output as "legacy" + +"Legacy" flakes are intended for legacyPackages on Nixpkgs. +The "legacy" mark is intended to reduce evaluation time due to the extreme size of legacyPackages. +Note: the name "legacy" is not intended as a value judgement, and at this point we're sort of stuck with the name. + +### Bug fixes + +* Fix crash in `nix repl` loading an invalid WASM file twice. [DeterminateSystems/nix-src#378](https://github.com/DeterminateSystems/nix-src/pull/378) +* Don't crash if SIGINT happens while printing an exception. [DeterminateSystems/nix-src#384](https://github.com/DeterminateSystems/nix-src/pull/384) +* `nix-env -i`: Wait for the async path writer. [DeterminateSystems/nix-src#385](https://github.com/DeterminateSystems/nix-src/pull/385) + + +**Full Changelog**: [v3.17.0...v3.17.1](https://github.com/DeterminateSystems/nix-src/compare/v3.17.0...v3.17.1) diff --git a/doc/manual/source/release-notes-determinate/v3.17.2.md b/doc/manual/source/release-notes-determinate/v3.17.2.md new file mode 100644 index 000000000000..ce45a8df7ec7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.17.2.md @@ -0,0 +1,16 @@ +# Release 3.17.2 (2026-03-27) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed + +### Bug fixes + +* Cache `getLegacyGitAccessor()`. [DeterminateSystems/nix-src#396](https://github.com/DeterminateSystems/nix-src/pull/396) +* Don't destroy `windowSize` mutex. [DeterminateSystems/nix-src#397](https://github.com/DeterminateSystems/nix-src/pull/397) +* When doing concurrent substitutions of the same path, download only once. [DeterminateSystems/nix-src#398](https://github.com/DeterminateSystems/nix-src/pull/398) +* `builtins.getFlake`: Handle `path:

` where *p* has a discarded string context. [DeterminateSystems/nix-src#402](https://github.com/DeterminateSystems/nix-src/pull/402) +* Ensure `_interruptCallbacks` is alive while `signalHandlerThread` is running. [DeterminateSystems/nix-src#403](https://github.com/DeterminateSystems/nix-src/pull/403) +* Fix assertion failure in `nix::BuiltPath::toRealisedPaths()`. [DeterminateSystems/nix-src#401](https://github.com/DeterminateSystems/nix-src/pull/401) + +**Full Changelog**: [v3.17.1...v3.17.2](https://github.com/DeterminateSystems/nix-src/compare/v3.17.1...v3.17.2) diff --git a/doc/manual/source/release-notes-determinate/v3.17.3.md b/doc/manual/source/release-notes-determinate/v3.17.3.md new file mode 100644 index 000000000000..24463e48485f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.17.3.md @@ -0,0 +1,48 @@ +# Release 3.17.3 (2026-04-07) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed + +### Fix for GHSA-g3g9-5vj6-r3gj: root privilege escalation via the Nix daemon + +This release contains a critical fix for a security vulnerability on Linux that allows any user that has access to the Nix daemon to obtain root privileges. +This vulnerability affects all versions of Determinate Nix prior to 3.17.3, and all versions of upstream Nix prior to 2.34.5, 2.33.4, 2.32.7, 2.31.4, 2.30.4, 2.29.3, and 2.28.6. +All Linux users are advised to upgrade immediately. + +For more details, see the upstream advisory [GHSA-g3g9-5vj6-r3gj](https://github.com/NixOS/nix/security/advisories/GHSA-g3g9-5vj6-r3gj). +Many thanks to edef for reporting this issue and to Sergei Zimmerman for implementing the fix. + +### Avoiding duplicate source tree downloads + +When multiple Nix processes (like `nix-eval-jobs` instances) fetch the same source tree at the same time, it was previously possible for each process to perform the download independently, resulting in wasteful multiple downloads of the same source tree. Nix now uses a per-source tree lock to ensure that only one process performs the download. + +PR: [DeterminateSystems/nix-src#410](https://github.com/DeterminateSystems/nix-src/pull/410) + +### WAT support in `builtins.wasm` + +`builtins.wasm` now supports WebAssembly Text Format (WAT) in addition to binary Wasm modules. +This is primarily useful for testing. + +PR: [DeterminateSystems/nix-src#405](https://github.com/DeterminateSystems/nix-src/pull/405) + +### Git shallow fetching + +Nix now removes Git's `shallow.lock` lock file before running `git fetch`. +This prevents fetches from hanging if Git was previously interrupted. + +PR: [DeterminateSystems/nix-src#414](https://github.com/DeterminateSystems/nix-src/pull/414) + +### Debugging improvements + +Certain C++ exceptions that should never happen (like `std::logic_error`) are now treated as aborts, providing stack traces and core dumps that are easier to debug. + +PR: [DeterminateSystems/nix-src#407](https://github.com/DeterminateSystems/nix-src/pull/407) + +### Bug fixes + +* `nix develop` respects `legacyPackages` again. [DeterminateSystems/nix-src#413](https://github.com/DeterminateSystems/nix-src/pull/413) + +* The `lookupPathResolved` data structure has been made thread-safe. [DeterminateSystems/nix-src#415](https://github.com/DeterminateSystems/nix-src/pull/415) + +**Full Changelog**: [v3.17.2...v3.17.3](https://github.com/DeterminateSystems/nix-src/compare/v3.17.2...v3.17.3) diff --git a/doc/manual/source/release-notes-determinate/v3.18.0.md b/doc/manual/source/release-notes-determinate/v3.18.0.md new file mode 100644 index 000000000000..aaf22deec8c8 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.18.0.md @@ -0,0 +1,42 @@ +# Release 3.18.0 (2026-04-20) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed + +### Sentry integration + +In order to more proactively keep track of crashes, Sentry is now integrated into Determinate Nix. + +This allows us to more easily triage and remedy crashes that occur in the wild, without depending on manual use reports. + +It can be enabled by: + +* populating the file `/etc/nix/sentry-endpoint` with a Sentry DSN; or +* setting the `NIX_SENTRY_ENDPOINT` environment variable to a Sentry DSN + +and can be disabled by: + +* setting the environment variable `DETSYS_IDS_TELEMETRY` to the value `disabled`; or +* setting the environment variable `NIX_SENTRY_ENDPOINT` to an empty string + +PR: [DeterminateSystems/nix-src#418](https://github.com/DeterminateSystems/nix-src/pull/418) + +### Pre-build hook now receives the JSON serialization of the derivation + +The pre-build hook already received the path of the derivation as an argument, but that path doesn't typically exist when called as a remote build. + +Now, the pre-build hook is spawned with the environment variable `NIX_DERIVATION_V4` set to a file that contains the JSON representation of the derivation in v4 format, allowing instrospection of e.g. `requiredSystemFeatures` for scheduling decisions. + +PR: [DeterminateSystems/nix-src#424](https://github.com/DeterminateSystems/nix-src/pull/424) + +### Fix empty Git exports when using legacy Git compatibility + +A regression introduced in v3.16.0 made it possible for there to be empty Git exports in the Nix store when using legacy Git flakes (those depending on Nix 2.19 lockfile semantics). + +This is now fixed. + +PR: [DeterminateSystems/nix-src#425](https://github.com/DeterminateSystems/nix-src/pull/425) + + +**Full Changelog**: [v3.17.3...v3.18.0](https://github.com/DeterminateSystems/nix-src/compare/v3.17.3...v3.18.0) diff --git a/doc/manual/source/release-notes-determinate/v3.18.1.md b/doc/manual/source/release-notes-determinate/v3.18.1.md new file mode 100644 index 000000000000..26c9592561ad --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.18.1.md @@ -0,0 +1,23 @@ +# Release 3.18.1 (2026-04-23) + +* Based on [upstream Nix 2.33.4](../release-notes/rl-2.33.md). + +## What's Changed + +### Sentry integration improvements + +This release includes fixes for a couple of issues with the Sentry integration: + +* Ensures the chroot helper starts before the Sentry thread, allowing chroot stores to work again +* Resets Mach exception ports on macOS, so that exec'd programs do not communicate with Determinate Nix's `crashpad_handler` + +Additionally, Determinate Nix now includes the Nix command and subcommand to Sentry reports to make it easier to discern where an issue happened. +Not that this does _not_ include any command-line arguments other than the command (such as `nix-daemon` and `nix`) and subcommand (such as `flake show`). + +PRs: +* [DeterminateSystems/nix-src#433](https://github.com/DeterminateSystems/nix-src/pull/433) +* [DeterminateSystems/nix-src#432](https://github.com/DeterminateSystems/nix-src/pull/432) +* [DeterminateSystems/nix-src#436](https://github.com/DeterminateSystems/nix-src/pull/436) + + +**Full Changelog**: [v3.18.0...v3.18.1](https://github.com/DeterminateSystems/nix-src/compare/v3.18.0...v3.18.1) diff --git a/doc/manual/source/release-notes-determinate/v3.8.6.md b/doc/manual/source/release-notes-determinate/v3.8.6.md new file mode 100644 index 000000000000..8f917f2362ff --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.8.6.md @@ -0,0 +1,14 @@ +# Release 3.8.6 (2025-08-19) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed +* Auto update release notes by @grahamc in [DeterminateSystems/nix-src#170](https://github.com/DeterminateSystems/nix-src/pull/170) +* Use WAL mode for SQLite cache databases (2nd attempt) by @edolstra in [DeterminateSystems/nix-src#167](https://github.com/DeterminateSystems/nix-src/pull/167) +* Enable parallel marking in boehm-gc by @edolstra in [DeterminateSystems/nix-src#168](https://github.com/DeterminateSystems/nix-src/pull/168) +* BasicClientConnection::queryPathInfo(): Don't throw exception for invalid paths by @edolstra in [DeterminateSystems/nix-src#172](https://github.com/DeterminateSystems/nix-src/pull/172) +* Fix queryPathInfo() negative caching by @edolstra in [DeterminateSystems/nix-src#173](https://github.com/DeterminateSystems/nix-src/pull/173) +* forceDerivation(): Wait for async path write after forcing value by @edolstra in [DeterminateSystems/nix-src#176](https://github.com/DeterminateSystems/nix-src/pull/176) + + +**Full Changelog**: [v3.8.5...v3.8.6](https://github.com/DeterminateSystems/nix-src/compare/v3.8.5...v3.8.6) diff --git a/doc/manual/source/release-notes-determinate/v3.9.0.md b/doc/manual/source/release-notes-determinate/v3.9.0.md new file mode 100644 index 000000000000..66deb69b6192 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.0.md @@ -0,0 +1,45 @@ +# Release 3.9.0 (2025-08-26) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed + +### Build-time flake inputs + +Some of our users have hundreds or thousands of flake inputs. +In those cases, it is painfully slow for Nix to fetch all the inputs during evaluation of the flake. + +Determinate Nix has an experimental feature for deferring the fetching to build time of the dependent derivations. + +This is currently in developer preview. +If you would like to try it, add the experimental feature to your `/etc/nix/nix.custom.conf`: + +```ini +extra-experimental-features = build-time-fetch-tree +``` + +Then, mark an input to be fetched at build time: + +```nix +inputs.example = { + type = "github"; + owner = "DeterminateSystems"; + repo = "example"; + flake = false; # <-- currently required + buildTime = true; +}; +``` + +Let us know what you think! + +PR: [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + +### Corrected inconsistent behavior of `nix flake check` + +Users reported that `nix flake check` would not consistently validate the entire flake. + +We've fixed this issue and improved our testing around `nix flake check`. + +PR: [DeterminateSystems/nix-src#182](https://github.com/DeterminateSystems/nix-src/pull/182) + +**Full Changelog**: [v3.8.6...v3.9.0](https://github.com/DeterminateSystems/nix-src/compare/v3.8.6...v3.9.0) diff --git a/doc/manual/source/release-notes-determinate/v3.9.1.md b/doc/manual/source/release-notes-determinate/v3.9.1.md new file mode 100644 index 000000000000..38d17199c2c0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.1.md @@ -0,0 +1,20 @@ +# Release 3.9.1 (2025-08-28) + +- Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +### A useful `nix flake init` template default + +Nix's default flake template is [extremely bare bones](https://github.com/NixOS/templates/blob/ad0e221dda33c4b564fad976281130ce34a20cb9/trivial/flake.nix), and not a useful starting point. + +Deteminate Nix now uses [a more fleshed out default template](https://github.com/DeterminateSystems/flake-templates/blob/8af99b99627da41f16897f60eb226db30c775e76/default/flake.nix), including targeting multiple systems. + +PR: [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + +### Build cancellation is repaired on macOS + +A recent macOS update changed how signals are handled by Nix and broke using Ctrl-C to stop a build. +Determinate Nix on macOS correctly handles these signals and stops the build. + +PR: [DeterminateSystems/nix-src#184](https://github.com/DeterminateSystems/nix-src/pull/184) + +**Full Changelog**: [v3.9.0...v3.9.1](https://github.com/DeterminateSystems/nix-src/compare/v3.9.0...v3.9.1) diff --git a/doc/manual/source/release-notes/rl-2.19.md b/doc/manual/source/release-notes/rl-2.19.md index 813279c9f05f..0596ef909619 100644 --- a/doc/manual/source/release-notes/rl-2.19.md +++ b/doc/manual/source/release-notes/rl-2.19.md @@ -69,7 +69,7 @@ This makes it match `nix derivation show`, which also maps store paths to information. -- When Nix is installed using the [binary installer](@docroot@/installation/installing-binary.md), in supported shells (Bash, Zsh, Fish) +- When Nix is installed using the binary installer, in supported shells (Bash, Zsh, Fish) [`XDG_DATA_DIRS`](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables) is now populated with the path to the `/share` subdirectory of the current profile. This means that command completion scripts, `.desktop` files, and similar artifacts installed via [`nix-env`](@docroot@/command-ref/nix-env.md) or [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) (experimental) can be found by any program that follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). diff --git a/doc/manual/source/release-notes/rl-2.24.md b/doc/manual/source/release-notes/rl-2.24.md index e9b46bb22b70..f608fb54f7d9 100644 --- a/doc/manual/source/release-notes/rl-2.24.md +++ b/doc/manual/source/release-notes/rl-2.24.md @@ -268,6 +268,21 @@ be configured using the `warn-large-path-threshold` setting, e.g. `--warn-large-path-threshold 100M`. +- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378) + + With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + + This led to no longer generating error traces, for example when listing a non-existing directory. + + This version catches these types of exception correctly and wraps them into Nix's own exeception type. + + Author: [**@Mic92**](https://github.com/Mic92) + +- `` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585) + + Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + + `` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. ## Contributors diff --git a/doc/manual/source/release-notes/rl-2.33.md b/doc/manual/source/release-notes/rl-2.33.md index bed697029389..810dcad00b15 100644 --- a/doc/manual/source/release-notes/rl-2.33.md +++ b/doc/manual/source/release-notes/rl-2.33.md @@ -279,3 +279,35 @@ This release was made possible by the following 33 contributors: - Henry [**(@cootshk)**](https://github.com/cootshk) - Martin Joerg [**(@mjoerg)**](https://github.com/mjoerg) - Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) +# Release 2.33.3 (2026-02-13) + +- S3 binary caches now use virtual-hosted-style addressing by default [#15208](https://github.com/NixOS/nix/issues/15208) + + S3 binary caches now use virtual-hosted-style URLs + (`https://bucket.s3.region.amazonaws.com/key`) instead of path-style URLs + (`https://s3.region.amazonaws.com/bucket/key`) when connecting to standard AWS + S3 endpoints. This enables HTTP/2 multiplexing and fixes TCP connection + exhaustion (TIME_WAIT socket accumulation) under high-concurrency workloads. + + A new `addressing-style` store option controls this behavior: + + - `auto` (default): virtual-hosted-style for standard AWS endpoints, path-style + for custom endpoints. + - `path`: forces path-style addressing (deprecated by AWS). + - `virtual`: forces virtual-hosted-style addressing (bucket names must not + contain dots). + + Bucket names containing dots (e.g., `my.bucket.name`) automatically fall back + to path-style addressing in `auto` mode, because dotted names create + multi-level subdomains that break TLS wildcard certificate validation. + + Example using path-style for backwards compatibility: + + ``` + s3://my-bucket/key?region=us-east-1&addressing-style=path + ``` + + Additionally, TCP keep-alive is now enabled on all HTTP connections, preventing + idle connections from being silently dropped by intermediate network devices + (NATs, firewalls, load balancers). + diff --git a/docker.nix b/docker.nix index d85927a08020..2bd6751762fa 100644 --- a/docker.nix +++ b/docker.nix @@ -8,6 +8,7 @@ # Image configuration name ? "nix", tag ? "latest", + fromImage ? null, bundleNixpkgs ? true, channelName ? "nixpkgs", channelURL ? "https://channels.nixos.org/nixpkgs-unstable", @@ -27,6 +28,8 @@ "org.opencontainers.image.description" = "Nix container image"; }, Cmd ? [ (lib.getExe bashInteractive) ], + extraPrePaths ? [ ], + extraPostPaths ? [ ], # Default Packages nix ? pkgs.nix, bashInteractive ? pkgs.bashInteractive, @@ -336,7 +339,7 @@ let globalFlakeRegistryPath="$nixCacheDir/flake-registry.json" ln -s ${flake-registry-path} $out$globalFlakeRegistryPath mkdir -p $out/nix/var/nix/gcroots/auto - rootName=$(${lib.getExe' nix "nix"} --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) + rootName=$(${lib.getExe' nix "nix"} hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName '') ); @@ -352,6 +355,7 @@ dockerTools.buildLayeredImageWithNixDb { gid uname gname + fromImage ; contents = [ baseSystem ]; @@ -372,11 +376,15 @@ dockerTools.buildLayeredImageWithNixDb { Env = [ "USER=${uname}" "PATH=${ - lib.concatStringsSep ":" [ - "${userHome}/.nix-profile/bin" - "/nix/var/nix/profiles/default/bin" - "/nix/var/nix/profiles/default/sbin" - ] + lib.concatStringsSep ":" ( + extraPrePaths + ++ [ + "${userHome}/.nix-profile/bin" + "/nix/var/nix/profiles/default/bin" + "/nix/var/nix/profiles/default/sbin" + ] + ++ extraPostPaths + ) }" "MANPATH=${ lib.concatStringsSep ":" [ diff --git a/flake.lock b/flake.lock index 4c0bf91927a4..d44ac1734fb9 100644 --- a/flake.lock +++ b/flake.lock @@ -3,15 +3,15 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1767039857, - "narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=", - "owner": "NixOS", + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", "repo": "flake-compat", - "rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", "type": "github" }, "original": { - "owner": "NixOS", + "owner": "edolstra", "repo": "flake-compat", "type": "github" } @@ -23,55 +23,51 @@ ] }, "locked": { - "lastModified": 1733312601, - "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", - "type": "github" + "lastModified": 1748821116, + "narHash": "sha256-F82+gS044J1APL0n4hH50GYdPRv/5JWm34oCJYmVKdE=", + "rev": "49f0870db23e8c1ca0b5259734a02cd9e1e371a1", + "revCount": 377, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/hercules-ci/flake-parts/0.1.377%2Brev-49f0870db23e8c1ca0b5259734a02cd9e1e371a1/01972f28-554a-73f8-91f4-d488cc502f08/source.tar.gz" }, "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/hercules-ci/flake-parts/0.1" } }, "git-hooks-nix": { "inputs": { - "flake-compat": [], + "flake-compat": "flake-compat", "gitignore": [], "nixpkgs": [ "nixpkgs" - ], - "nixpkgs-stable": [ - "nixpkgs" ] }, "locked": { - "lastModified": 1734279981, - "narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785", - "type": "github" + "lastModified": 1747372754, + "narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=", + "rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46", + "revCount": 1026, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/cachix/git-hooks.nix/0.1.1026%2Brev-80479b6ec16fefd9c1db3ea13aeb038c60530f46/0196d79a-1b35-7b8e-a021-c894fb62163d/source.tar.gz" }, "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941" } }, "nixpkgs": { "locked": { - "lastModified": 1771903837, - "narHash": "sha256-jEA8WggGKtMFeNeCKq3NK8cLEjJmG6/RLUElYYbBZ0E=", - "rev": "e764fc9a405871f1f6ca3d1394fb422e0a0c3951", + "lastModified": 1773222311, + "narHash": "sha256-BHoB/XpbqoZkVYZCfXJXfkR+GXFqwb/4zbWnOr2cRcU=", + "rev": "0590cd39f728e129122770c029970378a79d076a", + "revCount": 909248, "type": "tarball", - "url": "https://releases.nixos.org/nixos/25.11/nixos-25.11.6495.e764fc9a4058/nixexprs.tar.xz" + "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2511.909248%2Brev-0590cd39f728e129122770c029970378a79d076a/019ce32b-8ace-7339-b129-cceaa8dd10c6/source.tar.gz" }, "original": { "type": "tarball", - "url": "https://channels.nixos.org/nixos-25.11/nixexprs.tar.xz" + "url": "https://flakehub.com/f/NixOS/nixpkgs/0.2511" } }, "nixpkgs-23-11": { @@ -108,7 +104,6 @@ }, "root": { "inputs": { - "flake-compat": "flake-compat", "flake-parts": "flake-parts", "git-hooks-nix": "git-hooks-nix", "nixpkgs": "nixpkgs", diff --git a/flake.nix b/flake.nix index 6bb0c609f6d7..55ceca187523 100644 --- a/flake.nix +++ b/flake.nix @@ -1,24 +1,18 @@ { description = "The purely functional package manager"; - inputs.nixpkgs.url = "https://channels.nixos.org/nixos-25.11/nixexprs.tar.xz"; + inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.2511"; inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2"; inputs.nixpkgs-23-11.url = "github:NixOS/nixpkgs/a62e6edd6d5e1fa0329b8653c801147986f8d446"; - inputs.flake-compat = { - url = "github:NixOS/flake-compat"; - flake = false; - }; # dev tooling - inputs.flake-parts.url = "github:hercules-ci/flake-parts"; - inputs.git-hooks-nix.url = "github:cachix/git-hooks.nix"; + inputs.flake-parts.url = "https://flakehub.com/f/hercules-ci/flake-parts/0.1"; + inputs.git-hooks-nix.url = "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941"; # work around https://github.com/NixOS/nix/issues/7730 inputs.flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; inputs.git-hooks-nix.inputs.nixpkgs.follows = "nixpkgs"; - inputs.git-hooks-nix.inputs.nixpkgs-stable.follows = "nixpkgs"; # work around 7730 and https://github.com/NixOS/nix/issues/7807 - inputs.git-hooks-nix.inputs.flake-compat.follows = ""; inputs.git-hooks-nix.inputs.gitignore.follows = ""; outputs = @@ -34,26 +28,24 @@ officialRelease = true; - linux32BitSystems = [ "i686-linux" ]; + linux32BitSystems = [ ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linuxSystems = linux32BitSystems ++ linux64BitSystems; darwinSystems = [ - "x86_64-darwin" "aarch64-darwin" ]; systems = linuxSystems ++ darwinSystems; crossSystems = [ - "armv6l-unknown-linux-gnueabihf" - "armv7l-unknown-linux-gnueabihf" - "riscv64-unknown-linux-gnu" + #"armv6l-unknown-linux-gnueabihf" + #"armv7l-unknown-linux-gnueabihf" + #"riscv64-unknown-linux-gnu" # Disabled because of https://github.com/NixOS/nixpkgs/issues/344423 # "x86_64-unknown-netbsd" - "x86_64-unknown-freebsd" - "x86_64-w64-mingw32" + #"x86_64-unknown-freebsd" ]; stdenvs = [ @@ -375,6 +367,40 @@ nix-manual-manpages-only = nixpkgsFor.${system}.native.nixComponents2.nix-manual-manpages-only; nix-internal-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-internal-api-docs; nix-external-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-external-api-docs; + + fallbackPathsNix = + let + pkgs = nixpkgsFor.${system}.native; + + closures = forAllSystems (system: self.packages.${system}.default.outPath); + + closures_json = + pkgs.runCommand "versions.json" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "json" ]; + json = builtins.toJSON closures; + } + '' + cat "$jsonPath" | jq . > $out + ''; + + closures_nix = + pkgs.runCommand "versions.nix" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "template" ]; + jsonPath = closures_json; + template = '' + builtins.fromJSON('''@closures@''') + ''; + } + '' + export closures=$(cat "$jsonPath"); + substituteAll "$templatePath" "$out" + ''; + in + closures_nix; } # We need to flatten recursive attribute sets of derivations to pass `flake check`. // @@ -440,7 +466,6 @@ # These attributes go right into `packages.`. "${pkgName}" = nixpkgsFor.${system}.native.nixComponents2.${pkgName}; "${pkgName}-static" = nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName}; - "${pkgName}-llvm" = nixpkgsFor.${system}.native.pkgsLLVM.nixComponents2.${pkgName}; } // flatMapAttrs (lib.genAttrs stdenvs (_: { })) ( stdenvName: @@ -520,32 +545,6 @@ } ) ) - // lib.optionalAttrs (!nixpkgsFor.${system}.native.stdenv.isDarwin) ( - prefixAttrs "static" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsStatic; - } - ) - ) - // prefixAttrs "llvm" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsLLVM; - } - ) - ) - // prefixAttrs "cross" ( - forAllCrossSystems ( - crossSystem: - makeShell { - pkgs = nixpkgsFor.${system}.cross.${crossSystem}; - } - ) - ) - ) // { native = self.devShells.${system}.native-stdenv; default = self.devShells.${system}.native; diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f742a7440023..03d532f5c064 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -114,6 +114,7 @@ # Don't format vendored code ''^doc/manual/redirects\.js$'' ''^doc/manual/theme/highlight\.js$'' + ''^src/libfetchers/builtin-flake-registry\.json$'' ]; }; shellcheck = { diff --git a/maintainers/link-headers b/maintainers/link-headers new file mode 100755 index 000000000000..2457a2dc8295 --- /dev/null +++ b/maintainers/link-headers @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +# This script must be run from the root of the Nix repository. +# +# For include path hygiene, we need to put headers in a separate +# directory than sources. But during development, it is nice to paths +# that are similar for headers and source files, e.g. +# `foo/bar/baz.{cc,hh}`, e.g. for less typing when opening one file, and +# then opening the other file. +# +# This script symlinks the headers next to the source files to +# facilitate such a development workflows. It also updates +# `.git/info/exclude` so that the symlinks are not accidentally committed +# by mistake. + +from pathlib import Path +import subprocess +import os + + +def main() -> None: + # Path to the source directory + GIT_TOPLEVEL = Path( + subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + text=True, + stdout=subprocess.PIPE, + check=True, + ).stdout.strip() + ) + + # Get header files from git + result = subprocess.run( + ["git", "-C", str(GIT_TOPLEVEL), "ls-files", "*/include/nix/**.hh"], + text=True, + stdout=subprocess.PIPE, + check=True, + ) + header_files = result.stdout.strip().split("\n") + header_files.sort() + + links = [] + for file_str in header_files: + project_str, header_str = file_str.split("/include/nix/", 1) + project = Path(project_str) + header = Path(header_str) + + # Reconstruct the full path (relative to SRC_DIR) to the header file. + file = project / "include" / "nix" / header + + # The symlink should be created at "project/header", i.e. next to the project's sources. + link = project / header + + # Compute a relative path from the symlink's parent directory to the actual header file. + relative_source = os.path.relpath( + GIT_TOPLEVEL / file, GIT_TOPLEVEL / link.parent + ) + + # Create the symbolic link. + full_link_path = GIT_TOPLEVEL / link + full_link_path.parent.mkdir(parents=True, exist_ok=True) + if full_link_path.is_symlink(): + full_link_path.unlink() + full_link_path.symlink_to(relative_source) + links.append(link) + + # Generate .gitignore file + gitignore_path = GIT_TOPLEVEL / ".git" / "info" / "exclude" + gitignore_path.parent.mkdir(parents=True, exist_ok=True) + with gitignore_path.open("w") as gitignore: + gitignore.write("# DO NOT EDIT! Autogenerated\n") + gitignore.write( + "# Symlinks for headers to be next to sources for development\n" + ) + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + + for link in links: + gitignore.write(f"/{link}\n") + + +if __name__ == "__main__": + main() diff --git a/maintainers/upload-debug-info-to-sentry.py b/maintainers/upload-debug-info-to-sentry.py new file mode 100755 index 000000000000..92c3ec03029f --- /dev/null +++ b/maintainers/upload-debug-info-to-sentry.py @@ -0,0 +1,163 @@ +#!/usr/bin/env nix +#!nix shell --inputs-from . nixpkgs#sentry-cli nixpkgs#python3 nixpkgs#binutils --command python3 + +import argparse +import json +import os +import platform +import re +import subprocess +import sys +import urllib.error +import urllib.parse +import urllib.request + +NAR_DIR = "/tmp/nars" +DEBUG_INFO_DIR = "/tmp/debug-info" + + +def get_dynamic_libraries(executable: str) -> list[str]: + if platform.system() == "Darwin": + result = subprocess.run(["otool", "-L", executable], capture_output=True, text=True, check=True) + libs = [] + for line in result.stdout.splitlines()[1:]: # skip first line (the binary path itself) + # otool -L output lines look like: + # /nix/store/.../libfoo.dylib (compatibility version X.Y.Z, current version A.B.C) + m = re.match(r"\s+(\S+)\s+\(", line) + if m: + libs.append(m.group(1)) + return libs + else: + result = subprocess.run(["ldd", executable], capture_output=True, text=True, check=True) + libs = [] + for line in result.stdout.splitlines(): + # ldd output lines look like: + # libfoo.so.1 => /nix/store/.../libfoo.so.1 (0x...) + # /lib64/ld-linux-x86-64.so.2 (0x...) + m = re.search(r"=> (/\S+)", line) + if m: + libs.append(m.group(1)) + elif line.strip().startswith("/"): + path = line.strip().split()[0] + libs.append(path) + return libs + + +def get_build_id(path: str) -> str | None: + result = subprocess.run(["readelf", "-n", path], capture_output=True, text=True) + m = re.search(r"Build ID:\s+([0-9a-f]+)", result.stdout) + return m.group(1) if m else None + + +def download_nar(build_id: str, archive: str) -> str: + """Download a NAR to /tmp/nars and return the local path. Skips if already present.""" + base_url = f"https://cache.nixos.org/debuginfo/{build_id}" + nar_url = urllib.parse.urljoin(base_url, archive) + filename = nar_url.split("/")[-1] + local_path = os.path.join(NAR_DIR, filename) + if not os.path.exists(local_path): + os.makedirs(NAR_DIR, exist_ok=True) + print(f" downloading {nar_url} ...", file=sys.stderr) + urllib.request.urlretrieve(nar_url, local_path) + else: + print(f" already have {filename}", file=sys.stderr) + return local_path + + +def extract_debug_symbols(nar_path: str, member: str, build_id: str) -> str: + """Extract a member from a .nar.xz into /tmp/debug-info/.debug. Returns the output path.""" + out_path = os.path.join(DEBUG_INFO_DIR, f"{build_id}.debug") + if os.path.exists(out_path): + print(f" already extracted {out_path}", file=sys.stderr) + return out_path + os.makedirs(DEBUG_INFO_DIR, exist_ok=True) + print(f" extracting {member} -> {out_path} ...", file=sys.stderr) + xz = subprocess.Popen(["xz", "-d"], stdin=open(nar_path, "rb"), stdout=subprocess.PIPE) + nar_cat = subprocess.run( + ["nix", "nar", "cat", "/dev/stdin", member], + stdin=xz.stdout, + capture_output=True, + check=True, + ) + xz.wait() + with open(out_path, "wb") as f: + f.write(nar_cat.stdout) + return out_path + + +def find_debug_file_in_dirs(build_id: str, debug_dirs: list[str]) -> str | None: + """Look for a .debug file by build ID under

/lib/debug/.build-id/NN/NNN.debug.""" + subpath = os.path.join("lib", "debug", ".build-id", build_id[:2], build_id[2:] + ".debug") + for d in debug_dirs: + candidate = os.path.join(d, subpath) + if os.path.exists(candidate): + return candidate + return None + + +def fetch_debuginfo(build_id: str) -> dict | None: + url = f"https://cache.nixos.org/debuginfo/{build_id}" + try: + with urllib.request.urlopen(url) as resp: + return json.loads(resp.read()) + except urllib.error.HTTPError as e: + if e.code == 404: + return None + raise + + +def main(): + parser = argparse.ArgumentParser( + description="Upload debug symbols to Sentry." + ) + parser.add_argument("executable", help="Path to the executable (e.g. ./result/bin/nix)") + parser.add_argument("--project", help="Sentry project ID") + parser.add_argument("--debug-dir", action="append", default=[], metavar="DIR", + help="Directory to search for debug files (may be repeated, Linux only)") + args = parser.parse_args() + + libs = [args.executable] + get_dynamic_libraries(args.executable) + + if platform.system() == "Darwin": + # On macOS there are no separate debug info files; upload the binaries directly. + print("Files to upload:", file=sys.stderr) + for lib in libs: + print(f" {lib}", file=sys.stderr) + files_to_upload = libs + else: + debug_files = [] + print("ELF files to process:", file=sys.stderr) + for lib in libs: + build_id = get_build_id(lib) + if build_id is None: + print(f" {lib} (no build ID, uploading binary)", file=sys.stderr) + debug_files.append(lib) + continue + + local = find_debug_file_in_dirs(build_id, args.debug_dir) + if local: + print(f" {lib} ({build_id}): found locally at {local}", file=sys.stderr) + debug_files.append(local) + continue + + debuginfo = fetch_debuginfo(build_id) + if debuginfo is None: + print(f" {lib} ({build_id}): no separate debug info, uploading binary", file=sys.stderr) + debug_files.append(lib) + continue + print(f" {lib} ({build_id}): member={debuginfo['member']}", file=sys.stderr) + nar_path = download_nar(build_id, debuginfo["archive"]) + debug_file = extract_debug_symbols(nar_path, debuginfo["member"], build_id) + debug_files.append(debug_file) + files_to_upload = debug_files + + if files_to_upload: + print(f"Uploading {len(files_to_upload)} file(s) to Sentry...", file=sys.stderr) + cmd = ["sentry-cli", "debug-files", "upload"] + if args.project: + cmd += ["--project", args.project] + subprocess.run(cmd + files_to_upload, check=True) + + +if __name__ == "__main__": + main() diff --git a/nix-meson-build-support/common/cxa-throw/interpose-cxa-throw.cc b/nix-meson-build-support/common/cxa-throw/interpose-cxa-throw.cc new file mode 100644 index 000000000000..7238800b1906 --- /dev/null +++ b/nix-meson-build-support/common/cxa-throw/interpose-cxa-throw.cc @@ -0,0 +1,21 @@ +#include +#include +#include + +#include "is-logic-error.hh" + +typedef void (*cxa_throw_type)(void *, std::type_info *, void (*)(void *)); + +extern "C" void __cxa_throw(void * exc, std::type_info * tinfo, void (*dest)(void *)) +{ + if (is_logic_error(tinfo)) + abort_on_exception(exc, tinfo); + + static auto * orig = (cxa_throw_type) dlsym(RTLD_NEXT, "__cxa_throw"); + if (!orig) + abort(); + + orig(exc, tinfo, dest); + + __builtin_unreachable(); +} diff --git a/nix-meson-build-support/common/cxa-throw/is-logic-error.hh b/nix-meson-build-support/common/cxa-throw/is-logic-error.hh new file mode 100644 index 000000000000..9e5b1d638e7a --- /dev/null +++ b/nix-meson-build-support/common/cxa-throw/is-logic-error.hh @@ -0,0 +1,40 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifndef CXA_THROW_ON_LOGIC_ERROR +# define CXA_THROW_ON_LOGIC_ERROR() abort() +#endif + +static bool is_logic_error(const std::type_info * tinfo) +{ + if (*tinfo == typeid(std::logic_error)) + return true; + + auto * si = dynamic_cast(tinfo); + if (si) + return is_logic_error(si->__base_type); + + return false; +} + +static void abort_on_exception(void * exc, const std::type_info * tinfo) +{ + if (!is_logic_error(tinfo)) + return; + + char buf[512]; + snprintf( + buf, + sizeof(buf), + "Aborting on unexpected exception of type '%s', error: %s\n", + tinfo->name(), + ((std::exception *) exc)->what()); + [[maybe_unused]] auto r = write(STDERR_FILENO, buf, strlen(buf)); + + CXA_THROW_ON_LOGIC_ERROR(); +} diff --git a/nix-meson-build-support/common/cxa-throw/meson.build b/nix-meson-build-support/common/cxa-throw/meson.build new file mode 100644 index 000000000000..a20438059b95 --- /dev/null +++ b/nix-meson-build-support/common/cxa-throw/meson.build @@ -0,0 +1,77 @@ +have_cxa_throw = false + +can_interpose_cxa_throw_test_code = ''' +#include +#include + +#define CXA_THROW_ON_LOGIC_ERROR() _exit(0) +#include "interpose-cxa-throw.cc" + +int main() +{ + const char * volatile p = nullptr; + std::string s(p); + return 1; +} +''' + +can_interpose_cxa_throw_result = cxx.run( + can_interpose_cxa_throw_test_code, + args : [ '-ldl' ], + include_directories : include_directories('.'), + name : 'can interpose __cxa_throw (catches libstdc++ throws)', +) +can_interpose_cxa_throw = can_interpose_cxa_throw_result.compiled() and can_interpose_cxa_throw_result.returncode() == 0 + +if can_interpose_cxa_throw + interpose_cxa_throw_lib = static_library( + 'interpose-cxa-throw', + 'interpose-cxa-throw.cc', + dependencies : cxx.find_library('dl', required : false), + ) + + cxa_throw_dep = declare_dependency( + link_whole : interpose_cxa_throw_lib, + ) + + have_cxa_throw = true +else + can_wrap_cxa_throw_test_code = ''' + #include + #include + + #define CXA_THROW_ON_LOGIC_ERROR() _exit(0) + #include "wrap-cxa-throw.cc" + + int main() + { + const char * volatile p = nullptr; + std::string s(p); + return 1; + } + ''' + + wrap_cxa_throw_args = [ '-Wl,--wrap=__cxa_throw' ] + + can_wrap_cxa_throw_result = cxx.run( + can_wrap_cxa_throw_test_code, + args : wrap_cxa_throw_args, + include_directories : include_directories('.'), + name : 'can wrap __cxa_throw (catches libstdc++ throws)', + ) + can_wrap_cxa_throw = can_wrap_cxa_throw_result.compiled() and can_wrap_cxa_throw_result.returncode() == 0 + + if can_wrap_cxa_throw + wrap_cxa_throw_lib = static_library( + 'wrap-cxa-throw', + 'wrap-cxa-throw.cc', + ) + + cxa_throw_dep = declare_dependency( + link_whole : wrap_cxa_throw_lib, + link_args : wrap_cxa_throw_args + [ '-Wl,-u,__wrap___cxa_throw' ], + ) + + have_cxa_throw = true + endif +endif diff --git a/nix-meson-build-support/common/cxa-throw/wrap-cxa-throw.cc b/nix-meson-build-support/common/cxa-throw/wrap-cxa-throw.cc new file mode 100644 index 000000000000..f9de15533de0 --- /dev/null +++ b/nix-meson-build-support/common/cxa-throw/wrap-cxa-throw.cc @@ -0,0 +1,16 @@ +#include +#include + +#include "is-logic-error.hh" + +extern "C" void __real___cxa_throw(void *, std::type_info *, void (*)(void *)); + +extern "C" void __wrap___cxa_throw(void * exc, std::type_info * tinfo, void (*dest)(void *)) +{ + if (is_logic_error(tinfo)) + abort_on_exception(exc, tinfo); + + __real___cxa_throw(exc, tinfo, dest); + + __builtin_unreachable(); +} diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 20dc2e3c8cf8..bb6a9774c1ed 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -79,5 +79,15 @@ endif # Darwin ld doesn't like "X.Y.ZpreABCD+W" nix_soversion = meson.project_version().split('+')[0].split('pre')[0] +cxx = meson.get_compiler('cpp') + +# Clang does not support prelinking on static builds +if cxx.get_id() == 'clang' and get_option('default_library') == 'static' + prelink = false +else + prelink = true +endif + subdir('assert-fail') subdir('asan-options') +subdir('cxa-throw') diff --git a/nix-meson-build-support/default-system-cpu/meson.build b/nix-meson-build-support/default-system-cpu/meson.build index f63b07975b6e..3e872578efca 100644 --- a/nix-meson-build-support/default-system-cpu/meson.build +++ b/nix-meson-build-support/default-system-cpu/meson.build @@ -14,6 +14,6 @@ if (host_machine.cpu_family() in [ 'ppc64', 'ppc' ]) and host_machine.endian() = nix_system_cpu += 'le' elif host_machine.cpu_family() in [ 'mips64', 'mips' ] and host_machine.endian() == 'little' nix_system_cpu += 'el' -elif host_machine.cpu_family() == 'arm' +elif host_machine.cpu_family() in [ 'arm', 'arm64' ] nix_system_cpu = host_machine.cpu() endif diff --git a/packaging/components.nix b/packaging/components.nix index b74484b6e9a2..962f0ba05b2e 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -27,7 +27,7 @@ let pkg-config ; - baseVersion = lib.fileContents ../.version; + baseVersion = lib.fileContents ../.version-determinate; versionSuffix = lib.optionalString (!officialRelease) "pre"; @@ -51,15 +51,6 @@ let exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn); setVersionLayer = finalAttrs: prevAttrs: { - preConfigure = - prevAttrs.preConfigure or "" - + - # Update the repo-global .version file. - # Symlink ./.version points there, but by default only workDir is writable. - '' - chmod u+w ./.version - echo ${finalAttrs.version} > ./.version - ''; }; localSourceLayer = diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 5338f70e57b3..c11c4149fa78 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -19,6 +19,7 @@ scope: { boehmgc = (pkgs.boehmgc.override { enableLargeConfig = true; + inherit stdenv; }).overrideAttrs (attrs: { # Increase the initial mark stack size to avoid stack @@ -27,7 +28,49 @@ scope: { # small, run Nix with GC_PRINT_STATS=1 and look for messages # such as `Mark stack overflow`, `No room to copy back mark # stack`, and `Grew mark stack to ... frames`. - NIX_CFLAGS_COMPILE = "-DINITIAL_MARK_STACK_SIZE=1048576"; + NIX_CFLAGS_COMPILE = [ + "-DINITIAL_MARK_STACK_SIZE=1048576" + ] + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt & libunwind over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + ++ + lib.optionals + ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) + [ + "-rtlib=compiler-rt" + "-unwindlib=libunwind" + ]; + + buildInputs = + (attrs.buildInputs or [ ]) + ++ lib.optional ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) pkgs.llvmPackages.libunwind; + }); + + lowdown = + if lib.versionAtLeast pkgs.lowdown.version "2.0.2" then + pkgs.lowdown + else + pkgs.lowdown.overrideAttrs (prevAttrs: rec { + version = "2.0.2"; + src = pkgs.fetchurl { + url = "https://kristaps.bsd.lv/lowdown/snapshots/lowdown-${version}.tar.gz"; + hash = "sha512-cfzhuF4EnGmLJf5EGSIbWqJItY3npbRSALm+GarZ7SMU7Hr1xw0gtBFMpOdi5PBar4TgtvbnG4oRPh+COINGlA=="; + }; + nativeBuildInputs = prevAttrs.nativeBuildInputs ++ [ pkgs.buildPackages.bmake ]; + postInstall = + lib.replaceStrings [ "lowdown.so.1" "lowdown.1.dylib" ] [ "lowdown.so.2" "lowdown.2.dylib" ] + (prevAttrs.postInstall or ""); }); curl = @@ -37,6 +80,9 @@ scope: { zstdSupport = true; brotliSupport = true; zlibSupport = true; + # libpsl uses a data file needed at runtime, not useful for nix. + pslSupport = !stdenv.hostPlatform.isStatic; + idnSupport = !stdenv.hostPlatform.isStatic; }).overrideAttrs { # TODO: Fix in nixpkgs. Static build with brotli is marked as broken, but it's not the case. @@ -44,9 +90,36 @@ scope: { meta.broken = false; }; - libblake3 = pkgs.libblake3.override { - useTBB = !(stdenv.hostPlatform.isWindows || stdenv.hostPlatform.isStatic); - }; + libblake3 = + (pkgs.libblake3.override { + inherit stdenv; + # Nixpkgs disables tbb on static + useTBB = !(stdenv.hostPlatform.isWindows || stdenv.hostPlatform.isStatic); + }) + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt & libunwind over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + .overrideAttrs + ( + attrs: + lib.optionalAttrs + ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) + { + NIX_CFLAGS_COMPILE = [ + "-rtlib=compiler-rt" + "-unwindlib=libunwind" + ]; + + buildInputs = [ + pkgs.llvmPackages.libunwind + ]; + } + ); # TODO Hack until https://github.com/NixOS/nixpkgs/issues/45462 is fixed. boost = @@ -57,12 +130,21 @@ scope: { "--with-coroutine" "--with-iostreams" "--with-url" + "--with-thread" ]; enableIcu = false; + inherit stdenv; }).overrideAttrs (old: { # Need to remove `--with-*` to use `--with-libraries=...` buildPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.buildPhase; installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase; }); + + wasmtime = pkgs.callPackage ./wasmtime.nix { }; + + sentry-native = (pkgs.callPackage ./sentry-native.nix { }).override { + # Avoid having two curls in our closure. + inherit (scope) curl; + }; } diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 72d517d876a8..479229b06d0f 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -217,7 +217,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( }; # Remove the version suffix to avoid unnecessary attempts to substitute in nix develop - version = lib.fileContents ../.version; + version = lib.fileContents ../.version-determinate; name = finalAttrs.pname; installFlags = "sysconfdir=$(out)/etc"; @@ -291,7 +291,8 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( map (transformFlag "perl") (ignoreCrossFile pkgs.nixComponents2.nix-perl-bindings.mesonFlags) ) ++ map (transformFlag "libexpr") (ignoreCrossFile pkgs.nixComponents2.nix-expr.mesonFlags) - ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags); + ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags) + ++ map (transformFlag "nix") (ignoreCrossFile pkgs.nixComponents2.nix-cli.mesonFlags); nativeBuildInputs = let diff --git a/packaging/everything.nix b/packaging/everything.nix index 4d8f94f4b9ab..1d07dd4da09a 100644 --- a/packaging/everything.nix +++ b/packaging/everything.nix @@ -46,6 +46,10 @@ testers, patchedSrc ? null, + + curl, + boehmgc, + sentry-native, }: let @@ -77,7 +81,7 @@ let }; devdoc = buildEnv { - name = "nix-${nix-cli.version}-devdoc"; + name = "determinate-nix-${nix-cli.version}-devdoc"; paths = [ nix-internal-api-docs nix-external-api-docs @@ -86,7 +90,7 @@ let in stdenv.mkDerivation (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; version = nix-cli.version; /** @@ -104,6 +108,7 @@ stdenv.mkDerivation (finalAttrs: { "dev" "doc" "man" + "debug" ]; /** @@ -155,9 +160,18 @@ stdenv.mkDerivation (finalAttrs: { installPhase = let devPaths = lib.mapAttrsToList (_k: lib.getDev) finalAttrs.finalPackage.libs; + debugPaths = lib.map (lib.getOutput "debug") ( + lib.attrValues finalAttrs.finalPackage.libs + ++ [ + nix-cli + curl + boehmgc + ] + ++ lib.optional (stdenv.hostPlatform.isLinux && !stdenv.hostPlatform.isStatic) sentry-native + ); in '' - mkdir -p $out $dev/nix-support + mkdir -p $out $dev/nix-support $debug/lib/debug # Custom files echo $libs >> $dev/nix-support/propagated-build-inputs @@ -170,6 +184,12 @@ stdenv.mkDerivation (finalAttrs: { lndir $lib $dev done + for d in ${lib.escapeShellArgs debugPaths}; do + if [[ -d $d/lib/debug ]]; then + lndir $d/lib/debug $debug/lib/debug + fi + done + # Forwarded outputs ln -sT ${nix-manual} $doc ln -sT ${nix-manual.man} $man diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 9b9b33d6798b..87d31a694b89 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -126,93 +126,6 @@ rec { system: self.devShells.${system}.default.inputDerivation )) [ "i686-linux" ]; - buildStatic = forAllPackages ( - pkgName: - lib.genAttrs linux64BitSystems ( - system: nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName} - ) - ); - - buildCross = forAllPackages ( - pkgName: - # Hack to avoid non-evaling package - ( - if pkgName == "nix-functional-tests" then - lib.flip builtins.removeAttrs [ "x86_64-w64-mingw32" ] - else if pkgName == "nix-nswrapper" then - lib.filterAttrs ( - crossSystem: _do_not_touch: nixpkgsFor.x86_64-linux.cross.${crossSystem}.stdenv.hostPlatform.isLinux - ) - else - lib.id - ) - ( - forAllCrossSystems ( - crossSystem: - lib.genAttrs [ "x86_64-linux" ] ( - system: nixpkgsFor.${system}.cross.${crossSystem}.nixComponents2.${pkgName} - ) - ) - ) - ); - - # Builds with sanitizers already have GC disabled, so this buildNoGc can just - # point to buildWithSanitizers in order to reduce the load on hydra. - buildNoGc = buildWithSanitizers; - - buildWithSanitizers = - let - components = forAllSystems ( - system: - let - pkgs = nixpkgsFor.${system}.native; - in - pkgs.nixComponents2.overrideScope ( - self: super: { - # Boost coroutines fail with ASAN on darwin. - withASan = !pkgs.stdenv.buildPlatform.isDarwin; - withUBSan = true; - nix-expr = super.nix-expr.override { enableGC = false; }; - # Unclear how to make Perl bindings work with a dynamically linked ASAN. - nix-perl-bindings = null; - } - ) - ); - in - forAllPackages ( - pkgName: - lib.filterAttrs ( - system: _do_not_touch: - pkgName == "nix-nswrapper" -> nixpkgsFor.${system}.native.stdenv.hostPlatform.isLinux - ) (forAllSystems (system: components.${system}.${pkgName})) - ); - - buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-cli); - - # Toggles some settings for better coverage. Windows needs these - # library combinations, and Debian build Nix with GNU readline too. - buildReadlineNoMarkdown = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-cmd = super.nix-cmd.override { - enableMarkdown = false; - readlineFlavor = "readline"; - }; - } - ) - ); - in - forAllPackages ( - pkgName: - lib.filterAttrs ( - system: _do_not_touch: - pkgName == "nix-nswrapper" -> nixpkgsFor.${system}.native.stdenv.hostPlatform.isLinux - ) (forAllSystems (system: components.${system}.${pkgName})) - ); - # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-perl-bindings); @@ -223,30 +136,6 @@ rec { system: nixpkgsFor.${system}.native.callPackage ./binary-tarball.nix { } ); - binaryTarballCross = lib.genAttrs [ "x86_64-linux" ] ( - system: - forAllCrossSystems ( - crossSystem: nixpkgsFor.${system}.cross.${crossSystem}.callPackage ./binary-tarball.nix { } - ) - ); - - # The first half of the installation script. This is uploaded - # to https://nixos.org/nix/install. It downloads the binary - # tarball for the user's system and calls the second half of the - # installation script. - installerScript = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."i686-linux" - self.hydraJobs.binaryTarball."aarch64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - self.hydraJobs.binaryTarball."aarch64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - installerScriptForGHA = forAllSystems ( system: nixpkgsFor.${system}.native.callPackage ./installer { @@ -314,6 +203,19 @@ rec { pkgs = nixpkgsFor.${system}.native; } ); + + nixpkgsLibTestsLazy = forAllSystems ( + system: + lib.overrideDerivation + (import (nixpkgs + "/lib/tests/test-with-nix.nix") { + lib = nixpkgsFor.${system}.native.lib; + nix = self.packages.${system}.nix-cli; + pkgs = nixpkgsFor.${system}.native; + }) + (_: { + "NIX_CONFIG" = "lazy-trees = true"; + }) + ); }; metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" { @@ -328,17 +230,12 @@ rec { in pkgs.runCommand "install-tests" { againstSelf = testNixVersions pkgs pkgs.nix; - againstCurrentLatest = - # FIXME: temporarily disable this on macOS because of #3605. - if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; + #againstCurrentLatest = + # # FIXME: temporarily disable this on macOS because of #3605. + # if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; # Disabled because the latest stable version doesn't handle # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work # againstLatestStable = testNixVersions pkgs pkgs.nixStable; } "touch $out" ); - - installerTests = import ../tests/installer { - binaryTarballs = self.hydraJobs.binaryTarball; - inherit nixpkgsFor; - }; } diff --git a/packaging/installer/default.nix b/packaging/installer/default.nix index e171f36f99f7..a8e344b496c8 100644 --- a/packaging/installer/default.nix +++ b/packaging/installer/default.nix @@ -32,7 +32,7 @@ runCommand "installer-script" in '' \ - --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ + --replace '@tarballHash_${system}@' $(nix hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \ '' ) tarballs diff --git a/packaging/sentry-native.nix b/packaging/sentry-native.nix new file mode 100644 index 000000000000..0e21e9be55c2 --- /dev/null +++ b/packaging/sentry-native.nix @@ -0,0 +1,54 @@ +{ + lib, + stdenv, + fetchgit, + cmake, + curl, + pkg-config, + python3, + darwin, +}: + +stdenv.mkDerivation rec { + pname = "sentry-native"; + version = "0.13.5"; + + src = fetchgit { + url = "https://github.com/getsentry/sentry-native"; + tag = version; + hash = "sha256-vDBI6lB1DMLleAgRCfsHvTSdtmXOzvJSaNAt+NwOd3c="; + fetchSubmodules = true; + }; + + dontFixCmake = true; + + nativeBuildInputs = [ + cmake + pkg-config + ] + ++ lib.optionals stdenv.hostPlatform.isDarwin [ + python3 + darwin.bootstrap_cmds + ]; + + postPatch = '' + # Borrowed from psutil: stick to the old SDK name for now. + substituteInPlace external/crashpad/util/mac/mac_util.cc \ + --replace-fail kIOMainPortDefault kIOMasterPortDefault + ''; + + buildInputs = [ + curl + ]; + + cmakeBuildType = "RelWithDebInfo"; + + cmakeFlags = [ ]; + + outputs = [ + "out" + "dev" + ]; + + separateDebugInfo = true; +} diff --git a/packaging/wasmtime.nix b/packaging/wasmtime.nix new file mode 100644 index 000000000000..d2c2b95f6072 --- /dev/null +++ b/packaging/wasmtime.nix @@ -0,0 +1,74 @@ +# Stripped-down version of https://github.com/NixOS/nixpkgs/blob/master/pkgs/by-name/wa/wasmtime/package.nix, +# license: https://github.com/NixOS/nixpkgs/blob/master/COPYING +{ + lib, + stdenv, + rust, + fetchFromGitHub, + cmake, + enableShared ? !stdenv.hostPlatform.isStatic, + enableStatic ? stdenv.hostPlatform.isStatic, +}: +rust.packages.stable.rustPlatform.buildRustPackage (finalAttrs: { + pname = "wasmtime"; + version = "40.0.2"; + + src = fetchFromGitHub { + owner = "bytecodealliance"; + repo = "wasmtime"; + tag = "v${finalAttrs.version}"; + hash = "sha256-4y9WpCdyuF/Tp2k/1d5rZxwYunWNdeibEsFgHcBC52Q="; + fetchSubmodules = true; + }; + + # Disable cargo-auditable until https://github.com/rust-secure-code/cargo-auditable/issues/124 is solved. + auditable = false; + + cargoHash = "sha256-aTPgnuBvOIqg1+Sa2ZLdMTLujm8dKGK5xpZ3qHpr3f8="; + cargoBuildFlags = [ + "--package" + "wasmtime-c-api" + "--no-default-features" + "--features cranelift,wasi,pooling-allocator,wat,demangle,gc-null" + ]; + + outputs = [ + "out" + "lib" + ]; + + nativeBuildInputs = [ + cmake + ]; + + doCheck = + with stdenv.buildPlatform; + # SIMD tests are only executed on platforms that support all + # required processor features (e.g. SSE3, SSSE3 and SSE4.1 on x86_64): + # https://github.com/bytecodealliance/wasmtime/blob/v9.0.0/cranelift/codegen/src/isa/x64/mod.rs#L220 + (isx86_64 -> sse3Support && ssse3Support && sse4_1Support) + && + # The dependency `wasi-preview1-component-adapter` fails to build because of: + # error: linker `rust-lld` not found + !isAarch64; + + postInstall = + let + inherit (stdenv.hostPlatform.rust) cargoShortTarget; + in + '' + moveToOutput lib $lib + ${lib.optionalString (!enableShared) "rm -f $lib/lib/*.so{,.*}"} + ${lib.optionalString (!enableStatic) "rm -f $lib/lib/*.a"} + + # copy the build.rs generated c-api headers + # https://github.com/rust-lang/cargo/issues/9661 + mkdir -p $out + cp -r target/${cargoShortTarget}/release/build/wasmtime-c-api-impl-*/out/include $out/include + '' + + lib.optionalString stdenv.hostPlatform.isDarwin '' + install_name_tool -id \ + $lib/lib/libwasmtime.dylib \ + $lib/lib/libwasmtime.dylib + ''; +}) diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 918f4bbd9e9e..000000000000 --- a/shell.nix +++ /dev/null @@ -1,3 +0,0 @@ -(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { - src = ./.; -}).shellNix diff --git a/src/external-api-docs/package.nix b/src/external-api-docs/package.nix index b194e16d4608..28cde8c09e69 100644 --- a/src/external-api-docs/package.nix +++ b/src/external-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-external-api-docs"; + pname = "determinate-nix-external-api-docs"; inherit version; workDir = ./.; diff --git a/src/internal-api-docs/package.nix b/src/internal-api-docs/package.nix index 6c4f354aee5c..636c19653eab 100644 --- a/src/internal-api-docs/package.nix +++ b/src/internal-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-internal-api-docs"; + pname = "determinate-nix-internal-api-docs"; inherit version; workDir = ./.; diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index fc7f1849384c..fccd3997e16b 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -119,9 +119,10 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const outputName); DrvOutput key{*drvOutput, outputName}; auto thisRealisation = store.queryRealisation(key); - assert(thisRealisation); // We’ve built it, so we must - // have the realisation - res.insert(Realisation{*thisRealisation, std::move(key)}); + if (thisRealisation) + res.insert(Realisation{*thisRealisation, std::move(key)}); + else + res.insert(outputPath); } else { res.insert(outputPath); } diff --git a/src/libcmd/builtin-flake-schemas.nix b/src/libcmd/builtin-flake-schemas.nix new file mode 100644 index 000000000000..39f09d92fa29 --- /dev/null +++ b/src/libcmd/builtin-flake-schemas.nix @@ -0,0 +1,488 @@ +{ + description = "Schemas for well-known Nix flake output types"; + + outputs = + { self }: + let + mapAttrsToList = f: attrs: map (name: f name attrs.${name}) (builtins.attrNames attrs); + + checkModule = + module_: + let + module = if builtins.isPath module_ then import module_ else module_; + in + builtins.isAttrs module || builtins.isFunction module; + + mkApp = system: app: { + forSystems = [ system ]; + evalChecks.isValidApp = + app ? type + && app.type == "app" + && app ? program + && builtins.isString app.program + && + builtins.removeAttrs app [ + "type" + "program" + "meta" + ] == { }; + what = "app"; + shortDescription = app.meta.description or ""; + }; + + mkPackage = isFlakeCheck: what: system: package: { + forSystems = [ system ]; + shortDescription = package.meta.description or ""; + derivationAttrPath = [ ]; + inherit what isFlakeCheck; + }; + + singleDerivationInventory = + what: isFlakeCheck: output: + self.lib.mkChildren (builtins.mapAttrs (mkPackage isFlakeCheck what) output); + + schemasSchema = { + version = 1; + doc = '' + The `schemas` flake output is used to define and document flake outputs. + For the expected format, consult the Nix manual. + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (schemaName: schemaDef: { + shortDescription = "A schema checker for the `${schemaName}` flake output"; + evalChecks.isValidSchema = + schemaDef.version or 0 == 1 + && schemaDef ? doc + && builtins.isString (schemaDef.doc) + && schemaDef ? inventory + && builtins.isFunction (schemaDef.inventory); + what = "flake schema"; + }) output + ); + }; + + appsSchema = { + version = 1; + doc = '' + The `apps` output provides commands available via `nix run`. + ''; + roles.nix-run = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (system: apps: { + forSystems = [ system ]; + children = builtins.mapAttrs (appName: app: mkApp system app) apps; + }) output + ); + }; + + defaultAppSchema = { + version = 1; + doc = '' + **DEPRECATED**. Use `apps..default` instead. + ''; + roles.nix-run = { }; + appendSystem = true; + defaultAttrPath = [ ]; + inventory = output: self.lib.mkChildren (builtins.mapAttrs mkApp output); + }; + + packagesSchema = { + version = 1; + doc = '' + The `packages` flake output contains packages that can be added to a shell using `nix shell`. + ''; + roles.nix-build = { }; + roles.nix-run = { }; + roles.nix-develop = { }; + roles.nix-search = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = self.lib.derivationsInventory "package" false; + }; + + defaultPackageSchema = { + version = 1; + doc = '' + **DEPRECATED**. Use `packages..default` instead. + ''; + roles.nix-build = { }; + roles.nix-run = { }; + roles.nix-develop = { }; + roles.nix-search = { }; + appendSystem = true; + defaultAttrPath = [ ]; + inventory = singleDerivationInventory "package" false; + }; + + ociImagesSchema = { + version = 1; + doc = '' + The `ociImages` flake output contains derivations that build valid Open Container Initiative images. + ''; + inventory = self.lib.derivationsInventory "OCI image" false; + }; + + legacyPackagesSchema = { + version = 1; + doc = '' + The `legacyPackages` flake output is similar to `packages` but different in that it can be nested and thus contain attribute sets that contain more packages. + Since enumerating packages in nested attribute sets can be inefficient, you should favor `packages` over `legacyPackages`. + ''; + roles.nix-build = { }; + roles.nix-run = { }; + roles.nix-search = { }; + roles.nix-develop = { }; + appendSystem = true; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (systemType: packagesForSystem: { + forSystems = [ systemType ]; + isLegacy = true; + children = + let + recurse = + prefix: attrs: + builtins.mapAttrs ( + attrName: attrs: + # Necessary to deal with `AAAAAASomeThingsFailToEvaluate` etc. in Nixpkgs. + self.lib.try ( + if attrs.type or null == "derivation" then + { + forSystems = [ attrs.system ]; + shortDescription = attrs.meta.description or ""; + derivationAttrPath = [ ]; + what = "package"; + } + else + # Recurse at the first and second levels, or if the + # recurseForDerivations attribute if set. + if attrs.recurseForDerivations or false then + { + children = recurse (prefix + attrName + ".") attrs; + } + else + { + what = "unknown"; + } + ) (throw "failed") + ) attrs; + in + # The top-level cannot be a derivation. + assert packagesForSystem.type or null != "derivation"; + recurse (systemType + ".") packagesForSystem; + }) output + ); + }; + + checksSchema = { + version = 1; + doc = '' + The `checks` flake output contains derivations that will be built by `nix flake check`. + ''; + # FIXME: add role + inventory = self.lib.derivationsInventory "CI test" true; + }; + + devShellsSchema = { + version = 1; + doc = '' + The `devShells` flake output contains derivations that provide a development environment for `nix develop`. + ''; + roles.nix-develop = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = self.lib.derivationsInventory "development environment" false; + }; + + devShellSchema = { + version = 1; + doc = '' + **DEPRECATED**. Use `devShells..default` instead. + ''; + roles.nix-develop = { }; + appendSystem = true; + defaultAttrPath = [ ]; + inventory = singleDerivationInventory "development environment" false; + }; + + formatterSchema = { + version = 1; + doc = '' + The `formatter` output specifies the package to use to format the project. + ''; + roles.nix-fmt = { }; + appendSystem = true; + defaultAttrPath = [ ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (system: formatter: { + forSystems = [ system ]; + shortDescription = formatter.meta.description or ""; + derivationAttrPath = [ ]; + what = "formatter"; + isFlakeCheck = false; + }) output + ); + }; + + templatesSchema = { + version = 1; + doc = '' + The `templates` output provides project templates. + ''; + roles.nix-template = { }; + defaultAttrPath = [ "default" ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (templateName: template: { + shortDescription = template.description or ""; + evalChecks.isValidTemplate = + template ? path + && builtins.isPath template.path + && template ? description + && builtins.isString template.description; + what = "template"; + }) output + ); + }; + + hydraJobsSchema = { + version = 1; + doc = '' + The `hydraJobs` flake output defines derivations to be built by the Hydra continuous integration system. + ''; + allowIFD = false; + inventory = + output: + let + recurse = + prefix: attrs: + self.lib.mkChildren ( + builtins.mapAttrs ( + attrName: attrs: + if attrs.type or null == "derivation" then + { + forSystems = [ attrs.system ]; + shortDescription = attrs.meta.description or ""; + derivationAttrPath = [ ]; + what = "Hydra CI test"; + } + else + recurse (prefix + attrName + ".") attrs + ) attrs + ); + in + # The top-level cannot be a derivation. + assert output.type or null != "derivation"; + recurse "" output; + }; + + overlaysSchema = { + version = 1; + doc = '' + The `overlays` flake output defines ["overlays"](https://nixos.org/manual/nixpkgs/stable/#chap-overlays) that can be plugged into Nixpkgs. + Overlays add additional packages or modify or replace existing packages. + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (overlayName: overlay: { + what = "Nixpkgs overlay"; + evalChecks.isOverlay = + # FIXME: should try to apply the overlay to an actual + # Nixpkgs. But we don't have access to a nixpkgs + # flake here. Maybe this schema should be moved to the + # nixpkgs flake, where it does have access. + if !builtins.isFunction overlay then + throw "Overlay is not a function. It should be structured like: `final: previous: { /* ... */ }`." + else + true; + }) output + ); + }; + + nixosConfigurationsSchema = { + version = 1; + doc = '' + The `nixosConfigurations` flake output defines [NixOS system configurations](https://nixos.org/manual/nixos/stable/#ch-configuration). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (configName: machine: { + what = "NixOS configuration"; + derivationAttrPath = [ + "config" + "system" + "build" + "toplevel" + ]; + forSystems = [ machine.pkgs.stdenv.system ]; + }) output + ); + }; + + nixosModulesSchema = { + version = 1; + doc = '' + The `nixosModules` flake output defines importable [NixOS modules](https://nixos.org/manual/nixos/stable/#sec-writing-modules). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (moduleName: module: { + what = "NixOS module"; + evalChecks.isFunctionOrAttrs = checkModule module; + }) output + ); + }; + + homeConfigurationsSchema = { + version = 1; + doc = '' + The `homeConfigurations` flake output defines [Home Manager configurations](https://github.com/nix-community/home-manager). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (configName: this: { + what = "Home Manager configuration"; + derivationAttrPath = [ "activationPackage" ]; + forSystems = [ this.activationPackage.system ]; + }) output + ); + }; + + homeModulesSchema = { + version = 1; + doc = '' + The `homeModules` flake output defines importable [Home Manager](https://github.com/nix-community/home-manager) modules. + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (moduleName: module: { + what = "Home Manager module"; + evalChecks.isFunctionOrAttrs = checkModule module; + }) output + ); + }; + + darwinConfigurationsSchema = { + version = 1; + doc = '' + The `darwinConfigurations` flake output defines [nix-darwin configurations](https://github.com/nix-darwin/nix-darwin). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (configName: this: { + what = "nix-darwin configuration"; + derivationAttrPath = [ "system" ]; + forSystems = [ this.system.system ]; + }) output + ); + }; + + darwinModulesSchema = { + version = 1; + doc = '' + The `darwinModules` flake output defines importable [nix-darwin modules](https://github.com/nix-darwin/nix-darwin). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (moduleName: module: { + what = "nix-darwin module"; + evalChecks.isFunctionOrAttrs = checkModule module; + }) output + ); + }; + + bundlersSchema = { + version = 1; + doc = '' + The `bundlers` flake output defines ["bundlers"](https://nix.dev/manual/nix/latest/command-ref/new-cli/nix3-bundle) that transform derivation outputs into other formats, typically self-extracting executables or container images. + ''; + roles.nix-bundler = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs ( + system: bundlers: + let + forSystems = [ system ]; + in + { + inherit forSystems; + children = builtins.mapAttrs (bundlerName: bundler: { + inherit forSystems; + evalChecks.isValidBundler = builtins.isFunction bundler; + what = "bundler"; + }) bundlers; + } + ) output + ); + }; + + in + + { + # Helper functions + lib = { + try = + e: default: + let + res = builtins.tryEval e; + in + if res.success then res.value else default; + + mkChildren = children: { inherit children; }; + + derivationsInventory = + what: isFlakeCheck: output: + self.lib.mkChildren ( + builtins.mapAttrs (systemType: packagesForSystem: { + forSystems = [ systemType ]; + children = builtins.mapAttrs ( + packageName: mkPackage isFlakeCheck what systemType + ) packagesForSystem; + }) output + ); + }; + + # FIXME: distinguish between available and active schemas? + schemas.schemas = schemasSchema; + schemas.apps = appsSchema; + schemas.defaultApp = defaultAppSchema; + schemas.packages = packagesSchema; + schemas.defaultPackage = defaultPackageSchema; + schemas.legacyPackages = legacyPackagesSchema; + schemas.checks = checksSchema; + schemas.devShells = devShellsSchema; + schemas.devShell = devShellSchema; + schemas.formatter = formatterSchema; + schemas.templates = templatesSchema; + schemas.hydraJobs = hydraJobsSchema; + schemas.overlays = overlaysSchema; + schemas.nixosConfigurations = nixosConfigurationsSchema; + schemas.nixosModules = nixosModulesSchema; + schemas.homeConfigurations = homeConfigurationsSchema; + schemas.homeModules = homeModulesSchema; + schemas.darwinConfigurations = darwinConfigurationsSchema; + schemas.darwinModules = darwinModulesSchema; + schemas.ociImages = ociImagesSchema; + schemas.bundlers = bundlersSchema; + }; +} diff --git a/src/libcmd/call-flake-schemas.nix b/src/libcmd/call-flake-schemas.nix new file mode 100644 index 000000000000..f1604259aef7 --- /dev/null +++ b/src/libcmd/call-flake-schemas.nix @@ -0,0 +1,38 @@ +# The flake providing default schemas. +defaultSchemasFlake: + +# The flake whose contents we want to extract. +flake: + +let + + # Helper functions. + + mapAttrsToList = f: attrs: map (name: f name attrs.${name}) (builtins.attrNames attrs); + + outputNames = builtins.attrNames flake.outputs; + + schemas = flake.outputs.schemas or defaultSchemasFlake.schemas; + +in + +{ + outputs = flake.outputs; + + inventory = builtins.mapAttrs ( + outputName: _: + if schemas ? ${outputName} && schemas.${outputName}.version == 1 then + schemas.${outputName} + // ( + if flake.outputs ? ${outputName} then + { + output = schemas.${outputName}.inventory flake.outputs.${outputName}; + } + else + { + } + ) + else + { unknown = true; } + ) (schemas // flake.outputs); +} diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index d57b76411328..f61d38e4a3ea 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -166,6 +166,13 @@ ref EvalCommand::getEvalStore() ref EvalCommand::getEvalState() { if (!evalState) { + if (startReplOnEvalErrors && evalSettings.evalCores != 1U) { + // Disable parallel eval if the debugger is enabled, since + // they're incompatible at the moment. + warn("using the debugger disables multi-threaded evaluation"); + evalSettings.evalCores = 1; + } + evalState = std::allocate_shared( traceable_allocator(), lookupPath, getEvalStore(), fetchSettings, evalSettings, getStore()); diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 7e4bd7a162a3..3b0e07b2b1fa 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -19,17 +19,12 @@ namespace nix { -fetchers::Settings fetchSettings; - -static GlobalConfig::Register rFetchSettings(&fetchSettings); - EvalSettings evalSettings{ settings.readOnlyMode, { { "flake", [](EvalState & state, std::string_view rest) { - experimentalFeatureSettings.require(Xp::Flakes); // FIXME `parseFlakeRef` should take a `std::string_view`. auto flakeRef = parseFlakeRef(fetchSettings, std::string{rest}, {}, true, false); debug("fetching flake search path element '%s''", rest); @@ -186,7 +181,6 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const std::files } else if (hasPrefix(s, "flake:")) { - experimentalFeatureSettings.require(Xp::Flakes); auto flakeRef = parseFlakeRef(fetchSettings, std::string(s.substr(6)), {}, true, false); auto [accessor, lockedRef] = flakeRef.resolve(fetchSettings, *state.store).lazyFetch(fetchSettings, *state.store); diff --git a/src/libcmd/flake-schemas.cc b/src/libcmd/flake-schemas.cc new file mode 100644 index 000000000000..c8e2992be7e2 --- /dev/null +++ b/src/libcmd/flake-schemas.cc @@ -0,0 +1,387 @@ +#include "nix/cmd/flake-schemas.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/fetchers/fetch-to-store.hh" +#include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/flake/provenance.hh" + +namespace nix::flake_schemas { + +using namespace eval_cache; +using namespace flake; + +static LockedFlake getBuiltinDefaultSchemasFlake(EvalState & state) +{ + auto accessor = make_ref(); + + accessor->setPathDisplay("«builtin-flake-schemas»"); + + accessor->addFile( + CanonPath("flake.nix"), +#include "builtin-flake-schemas.nix.gen.hh" + ); + + auto [storePath, narHash] = state.store->computeStorePath("source", {accessor}); + + state.allowPath(storePath); // FIXME: should just whitelist the entire virtual store + + state.storeFS->mount(CanonPath(state.store->printStorePath(storePath)), accessor); + + // Construct a dummy flakeref. + auto flakeRef = parseFlakeRef( + fetchSettings, + fmt("tarball+https://builtin-flake-schemas?narHash=%s", narHash.to_string(HashFormat::SRI, true))); + + auto flake = readFlake(state, flakeRef, flakeRef, flakeRef, state.storePath(storePath), {}); + + return lockFlake(flakeSettings, state, flakeRef, {}, flake); +} + +ref call( + EvalState & state, + std::shared_ptr lockedFlake, + std::optional defaultSchemasFlake, + bool allowEvalCache) +{ + auto fingerprint = lockedFlake->getFingerprint(*state.store, state.fetchSettings); + + std::string callFlakeSchemasNix = +#include "call-flake-schemas.nix.gen.hh" + ; + + auto lockedDefaultSchemasFlake = defaultSchemasFlake + ? flake::lockFlake(flakeSettings, state, *defaultSchemasFlake, {}) + : getBuiltinDefaultSchemasFlake(state); + auto lockedDefaultSchemasFlakeFingerprint = + lockedDefaultSchemasFlake.getFingerprint(*state.store, state.fetchSettings); + + std::optional fingerprint2; + if (allowEvalCache && evalSettings.useEvalCache && evalSettings.pureEval && fingerprint + && lockedDefaultSchemasFlakeFingerprint) + fingerprint2 = hashString( + HashAlgorithm::SHA256, + fmt("app:%s:%s:%s", + hashString(HashAlgorithm::SHA256, callFlakeSchemasNix).to_string(HashFormat::Base16, false), + fingerprint->to_string(HashFormat::Base16, false), + lockedDefaultSchemasFlakeFingerprint->to_string(HashFormat::Base16, false))); + + if (fingerprint2) { + auto i = state.evalCaches.find(*fingerprint2); + if (i != state.evalCaches.end()) + return i->second; + } + + auto cache = make_ref( + fingerprint2, state, [&state, lockedFlake, callFlakeSchemasNix, lockedDefaultSchemasFlake]() { + auto vCallFlakeSchemas = state.allocValue(); + state.eval( + state.parseExprFromString(callFlakeSchemasNix, state.rootPath(CanonPath::root)), *vCallFlakeSchemas); + + auto vFlake = state.allocValue(); + flake::callFlake(state, *lockedFlake, *vFlake); + + auto vDefaultSchemasFlake = state.allocValue(); + if (vFlake->type() == nAttrs && vFlake->attrs()->get(state.symbols.create("schemas"))) + vDefaultSchemasFlake->mkNull(); + else + flake::callFlake(state, lockedDefaultSchemasFlake, *vDefaultSchemasFlake); + + auto vRes = state.allocValue(); + Value * args[] = {vDefaultSchemasFlake, vFlake}; + state.callFunction(*vCallFlakeSchemas, args, *vRes, noPos); + + return vRes; + }); + + /* Derive the flake output attribute path from the cursor used to + traverse the inventory. We do this so we don't have to maintain + a separate attrpath for that. */ + cache->cleanupAttrPath = [&](AttrPath && attrPath) { + AttrPath res; + auto i = attrPath.begin(); + if (i == attrPath.end()) + return attrPath; + + if (state.symbols[*i] == "inventory") { + ++i; + if (i != attrPath.end()) { + res.push_back(*i++); // copy output name + if (i != attrPath.end()) + ++i; // skip "outputs" + while (i != attrPath.end()) { + ++i; // skip "children" + if (i != attrPath.end()) + res.push_back(*i++); + } + } + } + + else if (state.symbols[*i] == "outputs") { + res.insert(res.begin(), ++i, attrPath.end()); + } + + else + abort(); + + return res; + }; + + if (fingerprint2) + state.evalCaches.emplace(*fingerprint2, cache); + + return cache; +} + +void forEachOutput( + ref inventory, + std::function output, const std::string & doc, bool isLast)> f) +{ + auto outputNames = inventory->getAttrs(); + + auto doOutputs = [&](bool allowIFD) { + evalSettings.enableImportFromDerivation.setDefault(allowIFD); + for (const auto & [i, outputName] : enumerate(outputNames)) { + auto outputInfo = inventory->getAttr(outputName); + try { + auto allowIFDAttr = outputInfo->maybeGetAttr("allowIFD"); + if (allowIFD != (!allowIFDAttr || allowIFDAttr->getBool())) + continue; + auto isUnknown = (bool) outputInfo->maybeGetAttr("unknown"); + auto output = outputInfo->maybeGetAttr("output"); + if (!output && !isUnknown) + // We have a schema but no corresponding output, so skip this. + continue; + Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", outputInfo->getAttrPathStr())); + f(outputName, + isUnknown ? std::shared_ptr() : output, + isUnknown ? "" : outputInfo->getAttr("doc")->getString(), + i + 1 == outputNames.size()); + } catch (Error & e) { + e.addTrace(nullptr, "while evaluating the flake output '%s':", outputInfo->getAttrPathStr()); + throw; + } + } + }; + + // Do outputs that disallow import-from-derivation first. That way, they can't depend on outputs that do allow it. + doOutputs(false); + doOutputs(true); +} + +void visit( + std::optional system, + bool includeLegacy, + ref node, + std::shared_ptr provenance, + std::function visitLeaf, + std::function)> visitNonLeaf, + std::function node, const std::vector & systems)> visitFiltered, + std::function node)> visitLegacy) +{ + Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", node->getAttrPathStr())); + + PushProvenance pushedProvenance( + node->root->state, + provenance ? std::make_shared(provenance, node->getAttrPathStr(), evalSettings.pureEval) + : nullptr); + + /* Filter out legacy outputs, unless --legacy is enabled. */ + if (!includeLegacy) { + if (auto b = node->maybeGetAttr("isLegacy"); b && b->getBool()) { + visitLegacy(node); + return; + } + } + + /* Apply the system type filter. */ + if (system) { + if (auto forSystems = Node(node).forSystems()) { + if (std::find(forSystems->begin(), forSystems->end(), *system) == forSystems->end()) { + visitFiltered(node, *forSystems); + return; + } + } + } + + if (auto children = node->maybeGetAttr("children")) { + visitNonLeaf([&](ForEachChild f) { + auto attrNames = children->getAttrs(); + for (const auto & [i, attrName] : enumerate(attrNames)) { + try { + f(attrName, children->getAttr(attrName), i + 1 == attrNames.size()); + } catch (Error & e) { + // FIXME: use the `isLegacy` attribute. + if (node->root->state.symbols[node->getAttrPath()[0]] != "legacyPackages") { + e.addTrace( + nullptr, "while evaluating the flake output attribute '%s':", node->getAttrPathStr()); + throw; + } + } + } + }); + } + + else + visitLeaf(Leaf(node)); +} + +std::optional> Node::forSystems() const +{ + if (auto forSystems = node->maybeGetAttr("forSystems")) + return forSystems->getListOfStrings(); + else + return std::nullopt; +} + +ref Node::getOutput(const ref & outputs) const +{ + auto res = outputs->findAlongAttrPath(node->getAttrPath()); + if (!res) + throw Error("flake output '%s' should exist according to its schema, but it doesn't", node->getAttrPathStr()); + return *res; +} + +std::optional Leaf::what() const +{ + if (auto what = node->maybeGetAttr("what")) + return what->getString(); + else + return std::nullopt; +} + +std::optional Leaf::shortDescription() const +{ + if (auto what = node->maybeGetAttr("shortDescription")) + return what->getString(); + return std::nullopt; +} + +std::optional Leaf::derivationAttrPath() const +{ + auto n = node->maybeGetAttr("derivationAttrPath"); + if (!n) + return std::nullopt; + return AttrPath::fromStrings(node->root->state, n->getListOfStrings()); +} + +std::shared_ptr Leaf::derivation(const ref & outputs) const +{ + auto path = derivationAttrPath(); + if (!path) { + auto n = node->maybeGetAttr("derivation"); + if (n) + warn( + "Flake output '%s' has a schema that uses the deprecated 'derivation' attribute instead of 'derivationAttrPath'. " + "Please update the schema to use 'derivationAttrPath' instead. " + "You may want to upgrade to version 0.3.0 or higher of https://github.com/DeterminateSystems/flake-schemas.", + node->getAttrPathStr()); + return n; + } + auto drv = getOutput(outputs)->findAlongAttrPath(*path); + if (!drv) + throw Error( + "flake output '%s' does not have a derivation attribute '%s'", + node->getAttrPathStr(), + path->to_string(node->root->state)); + return *drv; +} + +bool Leaf::isFlakeCheck() const +{ + auto isFlakeCheck = node->maybeGetAttr("isFlakeCheck"); + return isFlakeCheck && isFlakeCheck->getBool(); +} + +std::optional getOutputInfo(ref inventory, AttrPath attrPath) +{ + if (attrPath.empty()) + return std::nullopt; + + auto outputName = attrPath.front(); + + auto schemaInfo = inventory->maybeGetAttr(outputName); + if (!schemaInfo) + return std::nullopt; + + auto node = schemaInfo->maybeGetAttr("output"); + if (!node) + return std::nullopt; + + auto pathLeft = std::span(attrPath).subspan(1); + + while (!pathLeft.empty()) { + auto children = node->maybeGetAttr("children"); + if (!children) + break; + auto attr = pathLeft.front(); + node = children->maybeGetAttr(attr); + if (!node) + return std::nullopt; + pathLeft = pathLeft.subspan(1); + } + + return OutputInfo{ + .schemaInfo = ref(schemaInfo), + .nodeInfo = ref(node), + .leafAttrPath = AttrPath(pathLeft.begin(), pathLeft.end()), + }; +} + +Schemas getSchemas(ref inventory) +{ + auto & state(inventory->root->state); + + Schemas schemas; + + for (auto & schemaName : inventory->getAttrs()) { + auto schema = inventory->getAttr(schemaName); + + SchemaInfo schemaInfo; + + if (auto roles = schema->maybeGetAttr("roles")) { + for (auto & roleName : roles->getAttrs()) { + schemaInfo.roles.insert(std::string(state.symbols[roleName])); + } + } + + if (auto appendSystem = schema->maybeGetAttr("appendSystem")) + schemaInfo.appendSystem = appendSystem->getBool(); + + if (auto defaultAttrPath = schema->maybeGetAttr("defaultAttrPath")) { + AttrPath attrPath; + for (auto & s : defaultAttrPath->getListOfStrings()) + attrPath.push_back(state.symbols.create(s)); + schemaInfo.defaultAttrPath = std::move(attrPath); + } + + schemas.insert_or_assign(std::string(state.symbols[schemaName]), std::move(schemaInfo)); + } + + return schemas; +} + +} // namespace nix::flake_schemas + +namespace nix { + +MixFlakeSchemas::MixFlakeSchemas() +{ + addFlag( + {.longName = "default-flake-schemas", + .description = "The URL of the flake providing default flake schema definitions.", + .labels = {"flake-ref"}, + .handler = {&defaultFlakeSchemas}, + .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { + completeFlakeRef(completions, getStore(), prefix); + }}}); +} + +std::optional MixFlakeSchemas::getDefaultFlakeSchemas() +{ + if (!defaultFlakeSchemas) + return std::nullopt; + else + return parseFlakeRef(fetchSettings, *defaultFlakeSchemas, absPath(getCommandBaseDir())); +} + +} // namespace nix diff --git a/src/libcmd/include/nix/cmd/command.hh b/src/libcmd/include/nix/cmd/command.hh index fc67a60b5b8a..cfbd9eeec96e 100644 --- a/src/libcmd/include/nix/cmd/command.hh +++ b/src/libcmd/include/nix/cmd/command.hh @@ -148,7 +148,16 @@ struct MixFlakeOptions : virtual Args, EvalCommand } }; -struct SourceExprCommand : virtual Args, MixFlakeOptions +struct MixFlakeSchemas : virtual Args, virtual StoreCommand +{ + std::optional defaultFlakeSchemas; + + MixFlakeSchemas(); + + std::optional getDefaultFlakeSchemas(); +}; + +struct SourceExprCommand : virtual Args, MixFlakeOptions, MixFlakeSchemas { std::optional file; std::optional expr; @@ -159,9 +168,13 @@ struct SourceExprCommand : virtual Args, MixFlakeOptions ref parseInstallable(ref store, const std::string & installable); - virtual Strings getDefaultFlakeAttrPaths(); - - virtual Strings getDefaultFlakeAttrPathPrefixes(); + /** + * Return a set of "roles" that this command implements + * (e.g. `nix-build` or `nix-develop`). This is used by flake + * schemas to determine which flake outputs are used as default + * attrpath prefixes. + */ + virtual StringSet getRoles(); /** * Complete an installable from the given prefix. @@ -230,6 +243,8 @@ struct InstallableCommand : virtual Args, SourceExprCommand { InstallableCommand(); + virtual void preRun(ref store); + virtual void run(ref store, ref installable) = 0; void run(ref store) override; @@ -388,8 +403,7 @@ void completeFlakeRefWithFragment( AddCompletions & completions, ref evalState, flake::LockFlags lockFlags, - Strings attrPathPrefixes, - const Strings & defaultFlakeAttrPaths, + const StringSet & roles, std::string_view prefix); std::string showVersions(const StringSet & versions); diff --git a/src/libcmd/include/nix/cmd/common-eval-args.hh b/src/libcmd/include/nix/cmd/common-eval-args.hh index 14897158ae6d..93ffbd435ef9 100644 --- a/src/libcmd/include/nix/cmd/common-eval-args.hh +++ b/src/libcmd/include/nix/cmd/common-eval-args.hh @@ -26,9 +26,6 @@ namespace flake { struct Settings; } -/** - * @todo Get rid of global settings variables - */ extern fetchers::Settings fetchSettings; /** diff --git a/src/libcmd/include/nix/cmd/flake-schemas.hh b/src/libcmd/include/nix/cmd/flake-schemas.hh new file mode 100644 index 000000000000..bfc2a38cdef0 --- /dev/null +++ b/src/libcmd/include/nix/cmd/flake-schemas.hh @@ -0,0 +1,96 @@ +#pragma once + +#include "nix/expr/eval-cache.hh" +#include "nix/flake/flake.hh" +#include "nix/cmd/command.hh" + +namespace nix::flake_schemas { + +using namespace eval_cache; + +ref call( + EvalState & state, + std::shared_ptr lockedFlake, + std::optional defaultSchemasFlake, + bool allowEvalCache = true); + +void forEachOutput( + ref inventory, + std::function output, const std::string & doc, bool isLast)> f); + +/** + * A convenience wrapper around `AttrCursor` for nodes in the `inventory` tree returned by call-flake-schemas.nix. + */ +struct Node +{ + const ref node; + + Node(const ref & node) + : node(node) + { + } + + /** + * Return the `forSystems` attribute. This can be null, which + * means "all systems". + */ + std::optional> forSystems() const; + + /** + * Return the actual output corresponding to this info node. + */ + ref getOutput(const ref & outputs) const; +}; + +struct Leaf : Node +{ + using Node::Node; + + std::optional what() const; + + std::optional shortDescription() const; + + std::optional derivationAttrPath() const; + + /** + * Return the attribute corresponding to `derivationAttrPath`, if set. + */ + std::shared_ptr derivation(const ref & outputs) const; + + bool isFlakeCheck() const; +}; + +typedef std::function attr, bool isLast)> ForEachChild; + +void visit( + std::optional system, + bool includeLegacy, + ref node, + std::shared_ptr provenance, + std::function visitLeaf, + std::function)> visitNonLeaf, + std::function node, const std::vector & systems)> visitFiltered, + std::function node)> visitLegacy); + +struct OutputInfo +{ + ref schemaInfo; + ref nodeInfo; + AttrPath leafAttrPath; +}; + +std::optional getOutputInfo(ref inventory, AttrPath attrPath); + +struct SchemaInfo +{ + std::string doc; + StringSet roles; + bool appendSystem = false; + std::optional defaultAttrPath; +}; + +using Schemas = std::map; + +Schemas getSchemas(ref root); + +} // namespace nix::flake_schemas diff --git a/src/libcmd/include/nix/cmd/installable-attr-path.hh b/src/libcmd/include/nix/cmd/installable-attr-path.hh index 474bb358ec91..ef9dac813346 100644 --- a/src/libcmd/include/nix/cmd/installable-attr-path.hh +++ b/src/libcmd/include/nix/cmd/installable-attr-path.hh @@ -21,8 +21,6 @@ #include #include -#include - namespace nix { class InstallableAttrPath : public InstallableValue diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index 9f449ad48f2e..3acce913dcb7 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -36,11 +36,14 @@ struct ExtraPathInfoFlake : ExtraPathInfoValue struct InstallableFlake : InstallableValue { FlakeRef flakeRef; - Strings attrPaths; - Strings prefixes; + std::string fragment; + AttrPath parsedFragment; + StringSet roles; ExtendedOutputsSpec extendedOutputsSpec; const flake::LockFlags & lockFlags; mutable std::shared_ptr _lockedFlake; + bool useEvalCache = true; + std::optional defaultFlakeSchemas; InstallableFlake( SourceExprCommand * cmd, @@ -48,17 +51,15 @@ struct InstallableFlake : InstallableValue FlakeRef && flakeRef, std::string_view fragment, ExtendedOutputsSpec extendedOutputsSpec, - Strings attrPaths, - Strings prefixes, - const flake::LockFlags & lockFlags); + StringSet roles, + const flake::LockFlags & lockFlags, + std::optional defaultFlakeSchemas); std::string what() const override { - return flakeRef.to_string() + "#" + *attrPaths.begin(); + return flakeRef.to_string() + "#" + fragment; } - std::vector getActualAttrPaths(); - DerivedPathsWithInfo toDerivedPaths() override; std::pair toValue(EvalState & state) override; @@ -67,11 +68,23 @@ struct InstallableFlake : InstallableValue * Get a cursor to every attrpath in getActualAttrPaths() that * exists. However if none exists, throw an exception. */ - std::vector> getCursors(EvalState & state) override; + std::vector> getCursors(EvalState & state, bool useDefaultAttrPath) override; + + void getCompletions(const std::string & flakeRefS, AddCompletions & completions); ref getLockedFlake() const; FlakeRef nixpkgsFlakeRef() const; + + std::shared_ptr makeProvenance(std::string_view attrPath) const; + + ref openEvalCache() const; + +private: + + mutable std::shared_ptr _evalCache; + + std::vector getAttrPaths(bool useDefaultAttrPath, ref inventory); }; /** diff --git a/src/libcmd/include/nix/cmd/installable-value.hh b/src/libcmd/include/nix/cmd/installable-value.hh index 27a1fb9815d4..09178c96c972 100644 --- a/src/libcmd/include/nix/cmd/installable-value.hh +++ b/src/libcmd/include/nix/cmd/installable-value.hh @@ -93,7 +93,7 @@ struct InstallableValue : Installable * However if none exists, throw exception instead of returning * empty vector. */ - virtual std::vector> getCursors(EvalState & state); + virtual std::vector> getCursors(EvalState & state, bool useDefaultAttrPath = true); /** * Get the first and most preferred cursor this Installable could diff --git a/src/libcmd/include/nix/cmd/installables.hh b/src/libcmd/include/nix/cmd/installables.hh index 530334e037b7..2ea35261c7fa 100644 --- a/src/libcmd/include/nix/cmd/installables.hh +++ b/src/libcmd/include/nix/cmd/installables.hh @@ -96,6 +96,22 @@ typedef std::vector DerivedPathsWithInfo; struct Installable; +struct InstallableWithBuildResult +{ + ref installable; + + using Success = BuiltPathWithResult; + + using Failure = BuildResult; // must be a `BuildResult::Failure` + + std::variant result; + + /** + * Throw an exception if this represents a failure, otherwise returns a `BuiltPathWithResult`. + */ + const BuiltPathWithResult & getSuccess() const; +}; + /** * Shorthand, for less typing and helping us keep the choice of * collection in sync. @@ -160,13 +176,15 @@ struct Installable const Installables & installables, BuildMode bMode = bmNormal); - static std::vector, BuiltPathWithResult>> build2( + static std::vector build2( ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode = bmNormal); + static void throwBuildErrors(std::vector & buildResults, const Store & store); + static std::set toStorePathSet( ref evalStore, ref store, Realise mode, OperateOn operateOn, const Installables & installables); diff --git a/src/libcmd/include/nix/cmd/meson.build b/src/libcmd/include/nix/cmd/meson.build index ac074b06ff3c..6dea1ea5be8c 100644 --- a/src/libcmd/include/nix/cmd/meson.build +++ b/src/libcmd/include/nix/cmd/meson.build @@ -9,6 +9,7 @@ headers = files( 'common-eval-args.hh', 'compatibility-settings.hh', 'editor-for.hh', + 'flake-schemas.hh', 'get-build-log.hh', 'installable-attr-path.hh', 'installable-derived-path.hh', diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc index 28c3db3fc79a..3a80aa384de4 100644 --- a/src/libcmd/installable-attr-path.cc +++ b/src/libcmd/installable-attr-path.cc @@ -89,7 +89,8 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() } DerivedPathsWithInfo res; - for (auto & [drvPath, outputs] : byDrvPath) + for (auto & [drvPath, outputs] : byDrvPath) { + state->waitForPath(drvPath); res.push_back({ .path = DerivedPath::Built{ @@ -102,6 +103,7 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() so we can fill in this info. */ }), }); + } return res; } diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 77a7c8d6ec1b..28ee953b2591 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -17,6 +17,8 @@ #include "nix/util/url.hh" #include "nix/fetchers/registry.hh" #include "nix/store/build-result.hh" +#include "nix/flake/provenance.hh" +#include "nix/cmd/flake-schemas.hh" #include #include @@ -25,32 +27,14 @@ namespace nix { -std::vector InstallableFlake::getActualAttrPaths() -{ - std::vector res; - if (attrPaths.size() == 1 && attrPaths.front().starts_with(".")) { - attrPaths.front().erase(0, 1); - res.push_back(attrPaths.front()); - return res; - } - - for (auto & prefix : prefixes) - res.push_back(prefix + *attrPaths.begin()); - - for (auto & s : attrPaths) - res.push_back(s); - - return res; -} - -static std::string showAttrPaths(const std::vector & paths) +static std::string showAttrPaths(EvalState & state, const std::vector & paths) { std::string s; for (const auto & [n, i] : enumerate(paths)) { if (n > 0) s += n + 1 == paths.size() ? " or " : ", "; s += '\''; - s += i; + s += i.to_string(state); s += '\''; } return s; @@ -62,15 +46,17 @@ InstallableFlake::InstallableFlake( FlakeRef && flakeRef, std::string_view fragment, ExtendedOutputsSpec extendedOutputsSpec, - Strings attrPaths, - Strings prefixes, - const flake::LockFlags & lockFlags) + StringSet roles, + const flake::LockFlags & lockFlags, + std::optional defaultFlakeSchemas) : InstallableValue(state) , flakeRef(flakeRef) - , attrPaths(fragment == "" ? attrPaths : Strings{(std::string) fragment}) - , prefixes(fragment == "" ? Strings{} : prefixes) + , fragment(fragment) + , parsedFragment(AttrPath::parse(*state, fragment)) + , roles(roles) , extendedOutputsSpec(std::move(extendedOutputsSpec)) , lockFlags(lockFlags) + , defaultFlakeSchemas(defaultFlakeSchemas) { if (cmd && cmd->getAutoArgs(*state)->size()) throw UsageError("'--arg' and '--argstr' are incompatible with flakes"); @@ -84,6 +70,8 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() auto attrPath = attr->getAttrPathStr(); + PushProvenance pushedProvenance(*state, makeProvenance(attrPath)); + if (!attr->isDerivation()) { // FIXME: use eval cache? @@ -102,6 +90,7 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() } auto drvPath = attr->forceDerivation(); + state->waitForPath(drvPath); std::optional priority; @@ -157,34 +146,169 @@ std::pair InstallableFlake::toValue(EvalState & state) return {&getCursor(state)->forceValue(), noPos}; } -std::vector> InstallableFlake::getCursors(EvalState & state) +std::vector InstallableFlake::getAttrPaths(bool useDefaultAttrPath, ref inventory) { - auto evalCache = openEvalCache(state, getLockedFlake()); + if (fragment.starts_with(".")) + return {AttrPath::parse(*state, fragment.substr(1))}; + + std::vector attrPaths; + + auto schemas = flake_schemas::getSchemas(inventory); + + // FIXME: Ugly hack to preserve the historical precedence + // between outputs. We should add a way for schemas to declare + // priorities. + std::vector schemasSorted; + std::set schemasSeen; + auto doSchema = [&](const std::string & schema) { + if (schemas.contains(schema)) { + schemasSorted.push_back(schema); + schemasSeen.insert(schema); + } + }; + doSchema("apps"); + doSchema("defaultApp"); + doSchema("devShells"); + doSchema("devShell"); + doSchema("packages"); + doSchema("defaultPackage"); + doSchema("legacyPackages"); + for (auto & schema : schemas) + if (!schemasSeen.contains(schema.first)) + schemasSorted.push_back(schema.first); + + for (auto & role : roles) { + for (auto & schemaName : schemasSorted) { + auto & schema = schemas.find(schemaName)->second; + if (schema.roles.contains(role)) { + AttrPath attrPath{state->symbols.create(schemaName)}; + if (schema.appendSystem) + attrPath.push_back(state->symbols.create(settings.thisSystem.get())); + + if (useDefaultAttrPath && parsedFragment.empty()) { + if (schema.defaultAttrPath) { + auto attrPath2{attrPath}; + for (auto & x : *schema.defaultAttrPath) + attrPath2.push_back(x); + attrPaths.push_back(attrPath2); + } + } else { + auto attrPath2{attrPath}; + for (auto & x : parsedFragment) + attrPath2.push_back(x); + attrPaths.push_back(attrPath2); + } + } + } + } + + if (!parsedFragment.empty()) + attrPaths.push_back(parsedFragment); + + // FIXME: compatibility hack to get `nix repl` to return all + // outputs by default. + if (parsedFragment.empty() && roles.contains("nix-repl")) + attrPaths.push_back({}); + + return attrPaths; +} + +std::vector> InstallableFlake::getCursors(EvalState & state, bool useDefaultAttrPath) +{ + auto cache = openEvalCache(); + + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); + + auto attrPaths = getAttrPaths(useDefaultAttrPath, inventory); - auto root = evalCache->getRoot(); + if (attrPaths.empty()) + throw Error( + "Flake '%s' does not have any schema that provides a default output for the role(s) %s.", + flakeRef, + concatStringsSep(", ", roles)); std::vector> res; Suggestions suggestions; - auto attrPaths = getActualAttrPaths(); for (auto & attrPath : attrPaths) { - debug("trying flake output attribute '%s'", attrPath); + debug("trying flake output attribute '%s'", attrPath.to_string(state)); - auto attr = root->findAlongAttrPath(AttrPath::parse(state, attrPath)); - if (attr) { + PushProvenance pushedProvenance(state, makeProvenance(attrPath.to_string(state))); + +#if 0 + auto outputInfo = flake_schemas::getOutputInfo(inventory, attrPath); + + if (outputInfo && outputInfo->leafAttrPath.empty()) { + if (auto drv = outputInfo->nodeInfo->maybeGetAttr("derivation")) { + res.push_back(ref(drv)); + continue; + } + } +#endif + + auto attr = outputs->findAlongAttrPath(attrPath); + if (attr) res.push_back(ref(*attr)); - } else { + else suggestions += attr.getSuggestions(); - } } if (res.size() == 0) - throw Error(suggestions, "flake '%s' does not provide attribute %s", flakeRef, showAttrPaths(attrPaths)); + throw Error(suggestions, "flake '%s' does not provide attribute %s", flakeRef, showAttrPaths(state, attrPaths)); return res; } +void InstallableFlake::getCompletions(const std::string & flakeRefS, AddCompletions & completions) +{ + auto cache = openEvalCache(); + + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); + + if (fragment.ends_with(".") || fragment.empty()) + // Represent that we're looking for attributes starting with the empty prefix (i.e. all attributes inside the + // parent. + parsedFragment.push_back(state->symbols.create("")); + + auto attrPaths = getAttrPaths(true, inventory); + + if (fragment.empty()) + // Return all top-level flake outputs. + attrPaths.push_back(AttrPath{state->symbols.create("")}); + + auto lastAttr = fragment.ends_with(".") || parsedFragment.empty() ? std::string_view("") + : state->symbols[parsedFragment.back()]; + std::string prefix; + if (auto dot = fragment.rfind('.'); dot != std::string::npos) + prefix = fragment.substr(0, dot); + if (fragment.starts_with(".") && !prefix.starts_with(".")) + prefix = "." + prefix; + + for (auto attrPath : attrPaths) { + if (attrPath.empty()) + attrPath.push_back(state->symbols.create("")); + + auto attrPathParent{attrPath}; + attrPathParent.pop_back(); + + auto attr = outputs->findAlongAttrPath(attrPathParent); + if (!attr) + continue; + + for (auto & childName : (*attr)->getAttrs()) { + if (hasPrefix(state->symbols[childName], lastAttr)) { + auto attrPathChild = (*attr)->getAttrPath(childName); + completions.add( + flakeRefS + "#" + prefix + (prefix.empty() || prefix.ends_with(".") ? "" : ".") + + state->symbols[childName]); + } + } + } +} + ref InstallableFlake::getLockedFlake() const { if (!_lockedFlake) { @@ -197,6 +321,14 @@ ref InstallableFlake::getLockedFlake() const return ref(_lockedFlake); } +ref InstallableFlake::openEvalCache() const +{ + if (!_evalCache) { + _evalCache = flake_schemas::call(*state, getLockedFlake(), defaultFlakeSchemas, useEvalCache); + } + return ref(_evalCache); +} + FlakeRef InstallableFlake::nixpkgsFlakeRef() const { auto lockedFlake = getLockedFlake(); @@ -213,4 +345,12 @@ FlakeRef InstallableFlake::nixpkgsFlakeRef() const return defaultNixpkgsFlakeRef(); } +std::shared_ptr InstallableFlake::makeProvenance(std::string_view attrPath) const +{ + auto provenance = getLockedFlake()->flake.provenance; + if (!provenance) + return nullptr; + return std::make_shared(provenance, std::string(attrPath), evalSettings.pureEval); +} + } // namespace nix diff --git a/src/libcmd/installable-value.cc b/src/libcmd/installable-value.cc index 3a167af3db49..6c2fd60efd8e 100644 --- a/src/libcmd/installable-value.cc +++ b/src/libcmd/installable-value.cc @@ -4,7 +4,7 @@ namespace nix { -std::vector> InstallableValue::getCursors(EvalState & state) +std::vector> InstallableValue::getCursors(EvalState & state, bool useDefaultAttrPath) { auto evalCache = std::make_shared(std::nullopt, state, [&]() { return toValue(state).first; }); @@ -15,7 +15,7 @@ ref InstallableValue::getCursor(EvalState & state) { /* Although getCursors should return at least one element, in case it doesn't, bound check to avoid an undefined behavior for vector[0] */ - return getCursors(state).at(0); + return getCursors(state, true).at(0); } static UsageError nonValueInstallable(Installable & installable) @@ -55,7 +55,7 @@ InstallableValue::trySinglePathToDerivedPaths(Value & v, const PosIdx pos, std:: else if (v.type() == nString) { return {{ - .path = DerivedPath::fromSingle(state->coerceToSingleDerivedPath(pos, v, errorCtx)), + .path = DerivedPath::fromSingle(state->devirtualize(state->coerceToSingleDerivedPath(pos, v, errorCtx))), .info = make_ref(), }}; } diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 8f8309bd96d7..b297685250b5 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -21,6 +21,7 @@ #include "nix/util/url.hh" #include "nix/fetchers/registry.hh" #include "nix/store/build-result.hh" +#include "nix/util/exit.hh" #include #include @@ -241,19 +242,9 @@ MixReadOnlyOption::MixReadOnlyOption() }); } -Strings SourceExprCommand::getDefaultFlakeAttrPaths() +StringSet SourceExprCommand::getRoles() { - return {"packages." + settings.thisSystem.get() + ".default", "defaultPackage." + settings.thisSystem.get()}; -} - -Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes() -{ - return {// As a convenience, look for the attribute in - // 'outputs.packages'. - "packages." + settings.thisSystem.get() + ".", - // As a temporary hack until Nixpkgs is properly converted - // to provide a clean 'packages' set, look in 'legacyPackages'. - "legacyPackages." + settings.thisSystem.get() + "."}; + return {"nix-build"}; } Args::CompleterClosure SourceExprCommand::getCompleteInstallable() @@ -307,13 +298,7 @@ void SourceExprCommand::completeInstallable(AddCompletions & completions, std::s } } } else { - completeFlakeRefWithFragment( - completions, - getEvalState(), - lockFlags, - getDefaultFlakeAttrPathPrefixes(), - getDefaultFlakeAttrPaths(), - prefix); + completeFlakeRefWithFragment(completions, getEvalState(), lockFlags, getRoles(), prefix); } } catch (EvalError &) { // Don't want eval errors to mess-up with the completion engine, so let's just swallow them @@ -324,91 +309,37 @@ void completeFlakeRefWithFragment( AddCompletions & completions, ref evalState, flake::LockFlags lockFlags, - Strings attrPathPrefixes, - const Strings & defaultFlakeAttrPaths, + const StringSet & roles, std::string_view prefix) -{ - /* Look for flake output attributes that match the - prefix. */ - try { - auto hash = prefix.find('#'); - if (hash == std::string::npos) { - completeFlakeRef(completions, evalState->store, prefix); - } else { - completions.setType(AddCompletions::Type::Attrs); - - auto fragment = prefix.substr(hash + 1); - std::string prefixRoot = ""; - if (fragment.starts_with(".")) { - fragment = fragment.substr(1); - prefixRoot = "."; - } - auto flakeRefS = std::string(prefix.substr(0, hash)); - - // TODO: ideally this would use the command base directory instead of assuming ".". - auto flakeRef = - parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()); - - auto evalCache = openEvalCache( - *evalState, make_ref(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); - - auto root = evalCache->getRoot(); - - if (prefixRoot == ".") { - attrPathPrefixes.clear(); - } - /* Complete 'fragment' relative to all the - attrpath prefixes as well as the root of the - flake. */ - attrPathPrefixes.push_back(""); - - for (auto & attrPathPrefixS : attrPathPrefixes) { - auto attrPathPrefix = AttrPath::parse(*evalState, attrPathPrefixS); - auto attrPathS = attrPathPrefixS + std::string(fragment); - auto attrPath = AttrPath::parse(*evalState, attrPathS); - - std::string lastAttr; - if (!attrPath.empty() && !hasSuffix(attrPathS, ".")) { - lastAttr = evalState->symbols[attrPath.back()]; - attrPath.pop_back(); - } +try { + auto hash = prefix.find('#'); + if (hash == std::string::npos) { + completeFlakeRef(completions, evalState->store, prefix); + return; + } - auto attr = root->findAlongAttrPath(attrPath); - if (!attr) - continue; + completions.setType(AddCompletions::Type::Attrs); - for (auto & attr2 : (*attr)->getAttrs()) { - if (hasPrefix(evalState->symbols[attr2], lastAttr)) { - auto attrPath2 = (*attr)->getAttrPath(attr2); - /* Strip the attrpath prefix. */ - attrPath2.erase(attrPath2.begin(), attrPath2.begin() + attrPathPrefix.size()); - // FIXME: handle names with dots - completions.add(flakeRefS + "#" + prefixRoot + attrPath2.to_string(*evalState)); - } - } - } + auto fragment = prefix.substr(hash + 1); + auto flakeRefS = std::string(prefix.substr(0, hash)); - /* And add an empty completion for the default - attrpaths. */ - if (fragment.empty()) { - for (auto & attrPath : defaultFlakeAttrPaths) { - auto attr = root->findAlongAttrPath(AttrPath::parse(*evalState, attrPath)); - if (!attr) - continue; - completions.add(flakeRefS + "#" + prefixRoot); - } - } - } - } catch (Error & e) { - logWarning(e.info()); - } + InstallableFlake{ + nullptr, + evalState, + // TODO: ideally this would use the command base directory instead of assuming ".". + parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()), + fragment, + ExtendedOutputsSpec::Default{}, // FIXME: could be that we're completing the outputs spec... + roles, + lockFlags, + {}} + .getCompletions(flakeRefS, completions); +} catch (Error & e) { + logWarning(e.info()); } void completeFlakeRef(AddCompletions & completions, ref store, std::string_view prefix) { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - return; - if (prefix == "") completions.add("."); @@ -518,9 +449,9 @@ Installables SourceExprCommand::parseInstallables(ref store, std::vector< std::move(flakeRef), fragment, std::move(extendedOutputsSpec), - getDefaultFlakeAttrPaths(), - getDefaultFlakeAttrPathPrefixes(), - lockFlags)); + getRoles(), + lockFlags, + getDefaultFlakeSchemas())); continue; } catch (...) { ex = std::current_exception(); @@ -562,42 +493,69 @@ static SingleBuiltPath getBuiltPath(ref evalStore, ref store, cons b.raw()); } -std::vector Installable::build( - ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) +const BuiltPathWithResult & InstallableWithBuildResult::getSuccess() const { - std::vector res; - for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode)) - res.push_back(builtPathWithResult); - return res; + if (auto * failure = std::get_if(&result)) { + auto failure2 = failure->tryGetFailure(); + assert(failure2); + throw *failure2; + } else + return *std::get_if(&result); } -static void throwBuildErrors(std::vector & buildResults, const Store & store) +void Installable::throwBuildErrors(std::vector & buildResults, const Store & store) { - std::vector> failed; for (auto & buildResult : buildResults) { - if (auto * failure = buildResult.tryGetFailure()) { - failed.push_back({&buildResult, failure}); - } - } + if (std::get_if(&buildResult.result)) { + // Report success first. + for (auto & buildResult : buildResults) { + if (std::get_if(&buildResult.result)) + notice("✅ " ANSI_BOLD "%s" ANSI_NORMAL, buildResult.installable->what()); + } - auto failedResult = failed.begin(); - if (failedResult != failed.end()) { - if (failed.size() == 1) { - throw *failedResult->second; - } else { - StringSet failedPaths; - for (; failedResult != failed.end(); failedResult++) { - if (!failedResult->second->message().empty()) { - logError(failedResult->second->info()); + // Then cancelled builds. + for (auto & buildResult : buildResults) { + if (auto failure = std::get_if(&buildResult.result)) { + if (failure->isCancelled()) + notice( + "❓ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_FAINT " (cancelled)", + buildResult.installable->what()); + } + } + + // Then failures. + for (auto & buildResult : buildResults) { + if (auto failure = std::get_if(&buildResult.result)) { + if (failure->isCancelled()) + continue; + auto failure2 = failure->tryGetFailure(); + assert(failure2); + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, buildResult.installable->what()); + try { + throw *failure2; + } catch (Error & e) { + logError(e.info()); + } } - failedPaths.insert(failedResult->first->path.to_string(store)); } - throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); + + throw Exit(1); } } } -std::vector, BuiltPathWithResult>> Installable::build2( +std::vector Installable::build( + ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) +{ + auto results = build2(evalStore, store, mode, installables, bMode); + throwBuildErrors(results, *store); + std::vector res; + for (auto & b : results) + res.push_back(b.getSuccess()); + return res; +} + +std::vector Installable::build2( ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) { if (mode == Realise::Nothing) @@ -619,7 +577,7 @@ std::vector, BuiltPathWithResult>> Installable::build } } - std::vector, BuiltPathWithResult>> res; + std::vector res; switch (mode) { @@ -634,17 +592,21 @@ std::vector, BuiltPathWithResult>> Installable::build [&](const DerivedPath::Built & bfd) { auto outputs = resolveDerivedPath(*store, bfd, &*evalStore); res.push_back( - {aux.installable, - {.path = - BuiltPath::Built{ - .drvPath = - make_ref(getBuiltPath(evalStore, store, *bfd.drvPath)), - .outputs = outputs, - }, - .info = aux.info}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = + BuiltPath::Built{ + .drvPath = make_ref( + getBuiltPath(evalStore, store, *bfd.drvPath)), + .outputs = outputs, + }, + .info = aux.info}}); }, [&](const DerivedPath::Opaque & bo) { - res.push_back({aux.installable, {.path = BuiltPath::Opaque{bo.path}, .info = aux.info}}); + res.push_back( + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = BuiltPath::Opaque{bo.path}, .info = aux.info}}); }, }, path.raw()); @@ -658,9 +620,13 @@ std::vector, BuiltPathWithResult>> Installable::build printMissing(store, pathsToBuild, lvlInfo); auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); - throwBuildErrors(buildResults, *store); for (auto & buildResult : buildResults) { - // If we didn't throw, they must all be sucesses + if (buildResult.tryGetFailure()) { + for (auto & aux : backmap[buildResult.path]) { + res.push_back({.installable = aux.installable, .result = buildResult}); + } + continue; + } auto & success = std::get(buildResult.inner); for (auto & aux : backmap[buildResult.path]) { std::visit( @@ -670,20 +636,22 @@ std::vector, BuiltPathWithResult>> Installable::build for (auto & [outputName, realisation] : success.builtOutputs) outputs.emplace(outputName, realisation.outPath); res.push_back( - {aux.installable, - {.path = - BuiltPath::Built{ - .drvPath = - make_ref(getBuiltPath(evalStore, store, *bfd.drvPath)), - .outputs = outputs, - }, - .info = aux.info, - .result = buildResult}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = + BuiltPath::Built{ + .drvPath = make_ref( + getBuiltPath(evalStore, store, *bfd.drvPath)), + .outputs = outputs, + }, + .info = aux.info, + .result = buildResult}}); }, [&](const DerivedPath::Opaque & bo) { res.push_back( - {aux.installable, - {.path = BuiltPath::Opaque{bo.path}, .info = aux.info, .result = buildResult}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = BuiltPath::Opaque{bo.path}, .info = aux.info, .result = buildResult}}); }, }, buildResult.path.raw()); @@ -844,8 +812,11 @@ InstallableCommand::InstallableCommand() }); } +void InstallableCommand::preRun(ref store) {} + void InstallableCommand::run(ref store) { + preRun(store); auto installable = parseInstallable(store, _installable); run(store, std::move(installable)); } diff --git a/src/libcmd/meson.build b/src/libcmd/meson.build index d970a8e4b066..652279240657 100644 --- a/src/libcmd/meson.build +++ b/src/libcmd/meson.build @@ -71,6 +71,7 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') +subdir('nix-meson-build-support/generate-header') sources = files( 'built-path.cc', @@ -78,6 +79,7 @@ sources = files( 'command.cc', 'common-eval-args.cc', 'editor-for.cc', + 'flake-schemas.cc', 'get-build-log.cc', 'installable-attr-path.cc', 'installable-derived-path.cc', @@ -97,6 +99,11 @@ if host_machine.system() != 'windows' ) endif +sources += [ + gen_header.process('call-flake-schemas.nix'), + gen_header.process('builtin-flake-schemas.nix'), +] + subdir('include/nix/cmd') subdir('nix-meson-build-support/export-all-symbols') @@ -110,7 +117,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libcmd/package.nix b/src/libcmd/package.nix index c382f0e5760d..1d677142da1d 100644 --- a/src/libcmd/package.nix +++ b/src/libcmd/package.nix @@ -35,7 +35,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-cmd"; + pname = "determinate-nix-cmd"; inherit version; workDir = ./.; @@ -49,6 +49,8 @@ mkMesonLibrary (finalAttrs: { ./include/nix/cmd/meson.build (fileset.fileFilter (file: file.hasExt "cc") ./.) (fileset.fileFilter (file: file.hasExt "hh") ./.) + ./call-flake-schemas.nix + ./builtin-flake-schemas.nix ]; buildInputs = [ diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 52ecc9092328..19e842c2ce6e 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -171,7 +171,7 @@ ReplExitStatus NixRepl::mainLoop() if (state->debugRepl) { debuggerNotice = " debugger"; } - notice("Nix %1%%2%\nType :? for help.", nixVersion, debuggerNotice); + notice("Nix %s\nType :? for help.", version(), debuggerNotice); } isFirstRepl = false; @@ -311,6 +311,7 @@ StorePath NixRepl::getDerivationPath(Value & v) auto drvPath = packageInfo->queryDrvPath(); if (!drvPath) throw Error("expression did not evaluate to a valid derivation (no 'drvPath' attribute)"); + state->waitForPath(*drvPath); if (!state->store->isValidPath(*drvPath)) throw Error("expression evaluated to invalid derivation '%s'", state->store->printStorePath(*drvPath)); return *drvPath; diff --git a/src/libexpr-c/meson.build b/src/libexpr-c/meson.build index c47704ce4112..df1e3c05880c 100644 --- a/src/libexpr-c/meson.build +++ b/src/libexpr-c/meson.build @@ -54,7 +54,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libexpr-c/nix_api_expr.cc b/src/libexpr-c/nix_api_expr.cc index 97680ac6bfe7..943d90a211e6 100644 --- a/src/libexpr-c/nix_api_expr.cc +++ b/src/libexpr-c/nix_api_expr.cc @@ -71,6 +71,7 @@ nix_err nix_expr_eval_from_string( nix::Expr * parsedExpr = state->state.parseExprFromString(expr, state->state.rootPath(nix::CanonPath(path))); state->state.eval(parsedExpr, *value->value); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -82,6 +83,7 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, n try { state->state.callFunction(*fn->value, *arg->value, *value->value, nix::noPos); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -100,6 +102,7 @@ nix_err nix_value_call_multi( try { state->state.callFunction(*fn->value, {internal_args.data(), nargs}, *value->value, nix::noPos); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -110,6 +113,7 @@ nix_err nix_value_force(nix_c_context * context, EvalState * state, nix_value * context->last_err_code = NIX_OK; try { state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -120,6 +124,7 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_val context->last_err_code = NIX_OK; try { state->state.forceValueDeep(*value->value); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index 589ebf9a8ec2..e7a439aef196 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -395,6 +395,7 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value auto attr = v.attrs()->get(s); if (attr) { state->state.forceValue(*attr->value, nix::noPos); + state->state.waitForAllPaths(); return new_nix_value(attr->value, state->state.mem); } nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute"); diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 0220bf68e2df..a01bfb280599 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -101,6 +101,8 @@ typedef enum { * @see Externals */ NIX_TYPE_EXTERNAL, + /** @brief Failed value. Contains an exception that can be rethrown. + */ NIX_TYPE_FAILED, } ValueType; diff --git a/src/libexpr-c/package.nix b/src/libexpr-c/package.nix index 694fbc1fe789..ec92ecce1054 100644 --- a/src/libexpr-c/package.nix +++ b/src/libexpr-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr-c"; + pname = "determinate-nix-expr-c"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh index 68a0b8dea7d7..2311f3941c13 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh @@ -26,6 +26,12 @@ struct Arbitrary static Gen arbitrary(); }; +template<> +struct Arbitrary +{ + static Gen arbitrary(); +}; + template<> struct Arbitrary { diff --git a/src/libexpr-test-support/meson.build b/src/libexpr-test-support/meson.build index df28661b7e78..0fae96b47f1e 100644 --- a/src/libexpr-test-support/meson.build +++ b/src/libexpr-test-support/meson.build @@ -50,7 +50,7 @@ this_library = library( # TODO: Remove `-lrapidcheck` when https://github.com/emil-e/rapidcheck/pull/326 # is available. See also ../libutil/build.meson link_args : linker_export_flags + [ '-lrapidcheck' ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libexpr-test-support/package.nix b/src/libexpr-test-support/package.nix index 5cb4adaa8c46..1879a5716082 100644 --- a/src/libexpr-test-support/package.nix +++ b/src/libexpr-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/tests/value/context.cc b/src/libexpr-test-support/tests/value/context.cc index d6036601a948..8ce84fb51f54 100644 --- a/src/libexpr-test-support/tests/value/context.cc +++ b/src/libexpr-test-support/tests/value/context.cc @@ -16,6 +16,15 @@ Gen Arbitrary::arb }); } +Gen Arbitrary::arbitrary() +{ + return gen::map(gen::arbitrary(), [](StorePath storePath) { + return NixStringContextElem::Path{ + .storePath = storePath, + }; + }); +} + Gen Arbitrary::arbitrary() { return gen::mapcat( @@ -31,6 +40,8 @@ Gen Arbitrary::arbitrary() case 2: return gen::map( gen::arbitrary(), [](NixStringContextElem a) { return a; }); + case 3: + return gen::map(gen::arbitrary(), [](NixStringContextElem a) { return a; }); default: assert(false); } diff --git a/src/libexpr-tests/nix_api_expr.cc b/src/libexpr-tests/nix_api_expr.cc index c3a3f2dd53b1..27c7d69ef07e 100644 --- a/src/libexpr-tests/nix_api_expr.cc +++ b/src/libexpr-tests/nix_api_expr.cc @@ -577,6 +577,7 @@ struct DeploymentResourceState bool vm_created = false; }; +#if 0 static void primop_load_resource_input( void * user_data, nix_c_context * context, EvalState * state, nix_value ** args, nix_value * ret) { @@ -664,5 +665,6 @@ TEST_F(nix_api_expr_test, nix_expr_thunk_re_evaluation_after_deployment) assert_ctx_ok(); ASSERT_STREQ("vm-12345", result.c_str()); } +#endif } // namespace nixC diff --git a/src/libexpr-tests/value/print.cc b/src/libexpr-tests/value/print.cc index 654a50b0ae0b..c9cabf3fa421 100644 --- a/src/libexpr-tests/value/print.cc +++ b/src/libexpr-tests/value/print.cc @@ -194,14 +194,14 @@ TEST_F(ValuePrintingTests, vFailed) try { throw Error("nope"); } catch (...) { - v.mkFailed(std::current_exception(), nullptr); + v.mkFailed(); } // Historically, a tried and then ignored value (e.g. through tryEval) was // reverted to the original thunk. - test(v, "«thunk»"); - test(v, ANSI_MAGENTA "«thunk»" ANSI_NORMAL, PrintOptions{.ansiColors = true}); + test(v, "«failed»"); + test(v, ANSI_MAGENTA "«failed»" ANSI_NORMAL, PrintOptions{.ansiColors = true}); } TEST_F(ValuePrintingTests, depthAttrs) diff --git a/src/libexpr-tests/value/value.cc b/src/libexpr-tests/value/value.cc index 285b586c0dfc..e22e70f6e336 100644 --- a/src/libexpr-tests/value/value.cc +++ b/src/libexpr-tests/value/value.cc @@ -13,7 +13,6 @@ TEST_F(ValueTest, unsetValue) { Value unsetValue; ASSERT_EQ(false, unsetValue.isValid()); - ASSERT_EQ(nThunk, unsetValue.type()); } TEST_F(ValueTest, vInt) diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index c57b90112886..63197e4634fd 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -40,6 +40,14 @@ AttrPath AttrPath::parse(EvalState & state, std::string_view s) return res; } +AttrPath AttrPath::fromStrings(EvalState & state, const std::vector & attrNames) +{ + AttrPath res; + for (auto & attrName : attrNames) + res.push_back(state.symbols.create(attrName)); + return res; +} + std::string AttrPath::to_string(EvalState & state) const { return dropEmptyInitThenConcatStringsSep(".", state.symbols.resolve({*this})); diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index a97cef550e8d..15d1e70eae70 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -364,23 +364,33 @@ void AttrCursor::fetchCachedValue() throw CachedEvalError(parent->first, parent->second); } -AttrPath AttrCursor::getAttrPath() const +AttrPath AttrCursor::getAttrPathRaw() const { if (parent) { - auto attrPath = parent->first->getAttrPath(); + auto attrPath = parent->first->getAttrPathRaw(); attrPath.push_back(parent->second); return attrPath; } else return {}; } -AttrPath AttrCursor::getAttrPath(Symbol name) const +AttrPath AttrCursor::getAttrPath() const +{ + return root->cleanupAttrPath(getAttrPathRaw()); +} + +AttrPath AttrCursor::getAttrPathRaw(Symbol name) const { - auto attrPath = getAttrPath(); + auto attrPath = getAttrPathRaw(); attrPath.push_back(name); return attrPath; } +AttrPath AttrCursor::getAttrPath(Symbol name) const +{ + return root->cleanupAttrPath(getAttrPathRaw(name)); +} + std::string AttrCursor::getAttrPathStr() const { return getAttrPath().to_string(root->state); @@ -554,16 +564,17 @@ string_t AttrCursor::getStringWithContext() if (auto s = std::get_if(&cachedValue->second)) { bool valid = true; for (auto & c : s->second) { - const StorePath & path = std::visit( + const StorePath * path = std::visit( overloaded{ - [&](const NixStringContextElem::DrvDeep & d) -> const StorePath & { return d.drvPath; }, - [&](const NixStringContextElem::Built & b) -> const StorePath & { - return b.drvPath->getBaseStorePath(); + [&](const NixStringContextElem::DrvDeep & d) -> const StorePath * { return &d.drvPath; }, + [&](const NixStringContextElem::Built & b) -> const StorePath * { + return &b.drvPath->getBaseStorePath(); }, - [&](const NixStringContextElem::Opaque & o) -> const StorePath & { return o.path; }, + [&](const NixStringContextElem::Opaque & o) -> const StorePath * { return &o.path; }, + [&](const NixStringContextElem::Path & p) -> const StorePath * { return nullptr; }, }, c.raw); - if (!root->state.store->isValidPath(path)) { + if (!path || !root->state.store->isValidPath(*path)) { valid = false; break; } @@ -711,6 +722,7 @@ StorePath AttrCursor::forceDerivation() /* The eval cache contains 'drvPath', but the actual path has been garbage-collected. So force it to be regenerated. */ aDrvPath->forceValue(); + root->state.waitForPath(drvPath); if (!root->state.store->isValidPath(drvPath)) throw Error( "don't know how to recreate store derivation '%s'!", root->state.store->printStorePath(drvPath)); diff --git a/src/libexpr/eval-gc.cc b/src/libexpr/eval-gc.cc index 0d25f38f64de..c1e974e053b8 100644 --- a/src/libexpr/eval-gc.cc +++ b/src/libexpr/eval-gc.cc @@ -46,6 +46,88 @@ static void * oomHandler(size_t requested) throw std::bad_alloc(); } +static size_t getFreeMem() +{ + /* On Linux, use the `MemAvailable` or `MemFree` fields from + /proc/cpuinfo. */ +# ifdef __linux__ + { + std::unordered_map fields; + for (auto & line : + tokenizeString>(readFile(std::filesystem::path("/proc/meminfo")), "\n")) { + auto colon = line.find(':'); + if (colon == line.npos) + continue; + fields.emplace(line.substr(0, colon), trim(line.substr(colon + 1))); + } + + auto i = fields.find("MemAvailable"); + if (i == fields.end()) + i = fields.find("MemFree"); + if (i != fields.end()) { + auto kb = tokenizeString>(i->second, " "); + if (kb.size() == 2 && kb[1] == "kB") + return string2Int(kb[0]).value_or(0) * 1024; + } + } +# endif + + /* On non-Linux systems, conservatively assume that 25% of memory is free. */ + long pageSize = sysconf(_SC_PAGESIZE); + long pages = sysconf(_SC_PHYS_PAGES); + if (pageSize > 0 && pages > 0) + return (static_cast(pageSize) * static_cast(pages)) / 4; + return 0; +} + +/** + * When a thread goes into a coroutine, we lose its original sp until + * control flow returns to the thread. This causes Boehm GC to crash + * since it will scan memory between the coroutine's sp and the + * original stack base of the thread. Therefore, we detect when the + * current sp is outside of the original thread stack and push the + * entire thread stack instead, as an approximation. + * + * This is not optimal, because it causes the stack below sp to be + * scanned. However, we usually we don't have active coroutines during + * evaluation, so this is acceptable. + * + * Note that we don't scan coroutine stacks. It's currently assumed + * that we don't have GC roots in coroutines. + */ +void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id) +{ + void *& sp = *sp_ptr; + auto pthread_id = reinterpret_cast(_pthread_id); + size_t osStackSize; + char * osStackHi; + char * osStackLo; + +# ifdef __APPLE__ + osStackSize = pthread_get_stacksize_np(pthread_id); + osStackHi = (char *) pthread_get_stackaddr_np(pthread_id); + osStackLo = osStackHi - osStackSize; +# else + pthread_attr_t pattr; + if (pthread_attr_init(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_init failed"); +# ifdef HAVE_PTHREAD_GETATTR_NP + if (pthread_getattr_np(pthread_id, &pattr)) + throw Error("fixupBoehmStackPointer: pthread_getattr_np failed"); +# else +# error "Need `pthread_attr_get_np`" +# endif + if (pthread_attr_getstack(&pattr, (void **) &osStackLo, &osStackSize)) + throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed"); + if (pthread_attr_destroy(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed"); + osStackHi = osStackLo + osStackSize; +# endif + + if (sp >= osStackHi || sp < osStackLo) // sp is outside the os stack + sp = osStackLo; +} + static inline void initGCReal() { /* Initialise the Boehm garbage collector. */ @@ -76,8 +158,11 @@ static inline void initGCReal() GC_set_oom_fn(oomHandler); - /* Set the initial heap size to something fairly big (25% of - physical RAM, up to a maximum of 384 MiB) so that in most cases + GC_set_sp_corrector(&fixupBoehmStackPointer); + assert(GC_get_sp_corrector()); + + /* Set the initial heap size to something fairly big (80% of + free RAM, up to a maximum of 4 GiB) so that in most cases we don't need to garbage collect at all. (Collection has a fairly significant overhead.) The heap size can be overridden through libgc's GC_INITIAL_HEAP_SIZE environment variable. We @@ -88,15 +173,10 @@ static inline void initGCReal() if (!getEnv("GC_INITIAL_HEAP_SIZE")) { size_t size = 32 * 1024 * 1024; # if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES) - size_t maxSize = 384 * 1024 * 1024; - long pageSize = sysconf(_SC_PAGESIZE); - long pages = sysconf(_SC_PHYS_PAGES); - if (pageSize != -1) - size = (pageSize * pages) / 4; // 25% of RAM - if (size > maxSize) - size = maxSize; + size_t maxSize = 4ULL * 1024 * 1024 * 1024; + auto free = getFreeMem(); + size = std::max(size, std::min((size_t) (free * 0.5), maxSize)); # endif - debug("setting initial heap size to %1% bytes", size); GC_expand_hp(size); } } diff --git a/src/libexpr/eval-profiler.cc b/src/libexpr/eval-profiler.cc index 12c2f239df6e..9c7448d59076 100644 --- a/src/libexpr/eval-profiler.cc +++ b/src/libexpr/eval-profiler.cc @@ -171,6 +171,7 @@ class SampleStack : public EvalProfiler EvalState & state; std::chrono::nanoseconds sampleInterval; AutoCloseFD profileFd; + // FIXME: this needs to become per-thread to support multi-threaded evaluation. FrameStack stack; std::map callCount; std::chrono::time_point lastStackSample = diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 5cf0ae04304e..daf8dbc452b3 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -113,9 +113,19 @@ bool EvalSettings::isPseudoUrl(std::string_view s) std::string EvalSettings::resolvePseudoUrl(std::string_view url) { - if (hasPrefix(url, "channel:")) - return "https://channels.nixos.org/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; - else + if (hasPrefix(url, "channel:")) { + auto realUrl = "https://channels.nixos.org/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; + static bool haveWarned = false; + warnOnce( + haveWarned, + "Channels are deprecated in favor of flakes in Determinate Nix. " + "Instead of '%s', use '%s'. " + "See https://zero-to-nix.com for a guide to Nix flakes. " + "For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34.", + url, + realUrl); + return realUrl; + } else return std::string(url); } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 1ac9236d6609..7d95f0693d4d 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -26,6 +26,8 @@ #include "nix/fetchers/tarball.hh" #include "nix/fetchers/input-cache.hh" #include "nix/util/current-process.hh" +#include "nix/store/async-path-writer.hh" +#include "nix/expr/parallel-eval.hh" #include "parser-tab.hh" @@ -46,6 +48,11 @@ #include #include #include +#include + +#ifndef _WIN32 // TODO use portable implementation +# include +#endif #include "nix/util/strings-inline.hh" @@ -197,20 +204,36 @@ PosIdx Value::determinePos(const PosIdx pos) const return attrs()->pos; case tLambda: return lambda().fun->pos; +#if 0 + // FIXME: disabled because reading from an app is racy. case tApp: return app().left->determinePos(pos); +#endif default: return pos; } #pragma GCC diagnostic pop } -bool Value::isTrivial() const +template<> +bool ValueStorage::isTrivial() const { - return !isa() - && (!isa() - || (dynamic_cast(thunk().expr) && ((ExprAttrs *) thunk().expr)->dynamicAttrs->empty()) - || dynamic_cast(thunk().expr) || dynamic_cast(thunk().expr)); + auto p1_ = p1; // must acquire before reading p0, since thunks can change + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) { + bool isApp = p1_ & discriminatorMask; + if (isApp) + return false; + auto expr = untagPointer(p1_); + return (dynamic_cast(expr) && ((ExprAttrs *) expr)->dynamicAttrs->empty()) + || dynamic_cast(expr) || dynamic_cast(expr); + } + + else + return true; } static Symbol getName(const AttrName & name, EvalState & state, Env & env) @@ -232,6 +255,8 @@ EvalMemory::EvalMemory() assertGCInitialized(); } +thread_local EvalState::EvalContext EvalState::evalContext; + EvalState::EvalState( const LookupPath & lookupPathFromArguments, ref store, @@ -300,9 +325,12 @@ EvalState::EvalState( , debugRepl(nullptr) , debugStop(false) , trylevel(0) + , asyncPathWriter(AsyncPathWriter::make(store)) , srcToStore(make_ref()) , importResolutionCache(make_ref()) , fileEvalCache(make_ref()) + , positionToDocComment(make_ref()) + , lookupPathResolved(make_ref()) , regexCache(makeRegexCache()) #if NIX_USE_BOEHMGC , baseEnvP(std::allocate_shared(traceable_allocator(), &mem.allocEnv(BASE_ENV_SIZE))) @@ -311,6 +339,7 @@ EvalState::EvalState( , baseEnv(mem.allocEnv(BASE_ENV_SIZE)) #endif , staticBaseEnv{std::make_shared(nullptr, nullptr)} + , executor{make_ref(settings)} { corepkgsFS->setPathDisplay(""); internalFS->setPathDisplay("«nix-internal»", ""); @@ -457,7 +486,8 @@ void EvalState::checkURI(const std::string & uri0) Value * EvalState::addConstant(const std::string & name, Value & v, Constant info) { Value * v2 = allocValue(); - *v2 = v; + // Do a raw copy since `operator =` barfs on thunks. + memcpy((char *) v2, (char *) &v, sizeof(Value)); addConstant(name, v2, info); return v2; } @@ -473,8 +503,10 @@ void EvalState::addConstant(const std::string & name, Value * v, Constant info) We might know the type of a thunk in advance, so be allowed to just write it down in that case. */ - if (auto gotType = v->type(); gotType != nThunk) - assert(info.type == gotType); + if (v->isFinished()) { + if (auto gotType = v->type(); gotType != nThunk) + assert(info.type == gotType); + } /* Install value the base environment. */ staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl); @@ -660,7 +692,7 @@ void printStaticEnvBindings(const SymbolTable & st, const StaticEnv & se) // just for the current level of Env, not the whole chain. void printWithBindings(const SymbolTable & st, const Env & env) { - if (!env.values[0]->isThunk()) { + if (env.values[0]->isFinished()) { std::cout << "with: "; std::cout << ANSI_MAGENTA; auto j = env.values[0]->attrs()->begin(); @@ -715,7 +747,7 @@ void mapStaticEnvBindings(const SymbolTable & st, const StaticEnv & se, const En if (env.up && se.up) { mapStaticEnvBindings(st, *se.up, *env.up, vm); - if (se.isWith && !env.values[0]->isThunk()) { + if (se.isWith && env.values[0]->isFinished()) { // add 'with' bindings. for (auto & j : *env.values[0]->attrs()) vm.insert_or_assign(std::string(st[j.name]), j.value); @@ -954,7 +986,14 @@ void EvalState::mkPos(Value & v, PosIdx p) auto origin = positions.originOf(p); if (auto path = std::get_if(&origin)) { auto attrs = buildBindings(3); - attrs.alloc(s.file).mkString(path->path.abs(), mem); + if (path->accessor == rootFS && store->isInStore(path->path.abs())) + // FIXME: only do this for virtual store paths? + attrs.alloc(s.file).mkString( + path->path.abs(), + {NixStringContextElem::Path{.storePath = store->toStorePath(path->path.abs()).first}}, + mem); + else + attrs.alloc(s.file).mkString(path->path.abs(), mem); makePositionThunks(*this, p, attrs.alloc(s.line), attrs.alloc(s.column)); v.mkAttrs(attrs); } else @@ -1002,6 +1041,7 @@ std::string EvalState::mkSingleDerivedPathStringRaw(const SingleDerivedPath & p) auto optStaticOutputPath = std::visit( overloaded{ [&](const SingleDerivedPath::Opaque & o) { + waitForPath(o.path); auto drv = store->readDerivation(o.path); auto i = drv.outputs.find(b.output); if (i == drv.outputs.end()) @@ -1077,10 +1117,9 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) * from a thunk, ensuring that every file is parsed/evaluated only * once (via the thunk stored in `EvalState::fileEvalCache`). */ -struct ExprParseFile : Expr, gc +struct ExprParseFile : Expr { - // FIXME: make this a reference (see below). - SourcePath path; + SourcePath & path; bool mustBeTrivial; ExprParseFile(SourcePath & path, bool mustBeTrivial) @@ -1131,18 +1170,14 @@ void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) } Value * vExpr; - // FIXME: put ExprParseFile on the stack instead of the heap once - // https://github.com/NixOS/nix/pull/13930 is merged. That will ensure - // the post-condition that `expr` is unreachable after - // `forceValue()` returns. - auto expr = new ExprParseFile{*resolvedPath, mustBeTrivial}; + ExprParseFile expr{*resolvedPath, mustBeTrivial}; fileEvalCache->try_emplace_and_cvisit( *resolvedPath, nullptr, [&](auto & i) { vExpr = allocValue(); - vExpr->mkThunk(&baseEnv, expr); + vExpr->mkThunk(&baseEnv, &expr); i.second = vExpr; }, [&](auto & i) { vExpr = i.second; }); @@ -1448,7 +1483,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) state.attrSelects[pos2]++; } - state.forceValue(*vAttrs, (pos2 ? pos2 : this->pos)); + state.forceValue(*vAttrs, pos2 ? pos2 : this->pos); } catch (Error & e) { if (pos2) { @@ -1507,6 +1542,8 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v) v.mkLambda(&env, this); } +thread_local size_t EvalState::callDepth = 0; + void EvalState::callFunction(Value & fun, std::span args, Value & vRes, const PosIdx pos) { auto _level = addCallDepth(pos); @@ -1522,15 +1559,16 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, forceValue(fun, pos); - Value vCur(fun); + Value vCur = fun; auto makeAppChain = [&]() { - vRes = vCur; for (auto arg : args) { auto fun2 = allocValue(); - *fun2 = vRes; - vRes.mkPrimOpApp(fun2, arg); + *fun2 = vCur; + vCur.reset(); + vCur.mkPrimOpApp(fun2, arg); } + vRes = vCur; }; const Attr * functor; @@ -1626,6 +1664,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, lambda.name ? concatStrings("'", symbols[lambda.name], "'") : "anonymous lambda") : nullptr; + vCur.reset(); lambda.body->eval(*this, env2, vCur); } catch (Error & e) { if (loggerSettings.showTrace.get()) { @@ -1660,7 +1699,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, primOpCalls[fn->name]++; try { - fn->impl(*this, vCur.determinePos(noPos), args.data(), vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->impl(*this, pos, args.data(), vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1682,6 +1723,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, assert(primOp->isPrimOp()); auto arity = primOp->primOp()->arity; auto argsLeft = arity - argsDone; + assert(argsLeft); if (args.size() < argsLeft) { /* We still don't have enough arguments, so extend the tPrimOpApp chain. */ @@ -1710,7 +1752,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, // 2. Create a fake env (arg1, arg2, etc.) and a fake expr (arg1: arg2: etc: builtins.name arg1 arg2 // etc) // so the debugger allows to inspect the wrong parameters passed to the builtin. - fn->impl(*this, vCur.determinePos(noPos), vArgs, vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->impl(*this, pos, vArgs, vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1727,6 +1771,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, heap-allocate a copy and use that instead. */ Value * args2[] = {allocValue(), args[0]}; *args2[0] = vCur; + vCur.reset(); try { callFunction(*functor->value, args2, vCur, functor->pos); } catch (Error & e) { @@ -1914,8 +1959,12 @@ void ExprOpImpl::eval(EvalState & state, Env & env, Value & v) || state.evalBool(env, e2, pos, "in the right operand of the IMPL (->) operator")); } -void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) +void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) { + Value v1, v2; + state.evalAttrs(env, e1, v1, pos, "in the left operand of the update (//) operator"); + state.evalAttrs(env, e2, v2, pos, "in the right operand of the update (//) operator"); + state.nrOpUpdates++; const Bindings & bindings1 = *v1.attrs(); @@ -1989,42 +2038,6 @@ void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) state.nrOpUpdateValuesCopied += v.attrs()->size(); } -void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) -{ - UpdateQueue q; - evalForUpdate(state, env, q); - - Value vTmp; - vTmp.mkAttrs(&Bindings::emptyBindings); - - for (auto & rhs : std::views::reverse(q)) { - /* Remember that queue is sorted rightmost attrset first. */ - eval(state, /*v=*/vTmp, /*v1=*/vTmp, /*v2=*/rhs); - } - - v = vTmp; -} - -void Expr::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) -{ - Value v; - state.evalAttrs(env, this, v, getPos(), errorCtx); - q.push_back(v); -} - -void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q) -{ - /* Output rightmost attrset first to the merge queue as the one - with the most priority. */ - e2->evalForUpdate(state, env, q, "in the right operand of the update (//) operator"); - e1->evalForUpdate(state, env, q, "in the left operand of the update (//) operator"); -} - -void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) -{ - evalForUpdate(state, env, q); -} - void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v) { Value v1; @@ -2144,7 +2157,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) } else if (firstType == nFloat) { v.mkFloat(nf); } else if (firstType == nPath) { - if (!context.empty()) + if (hasContext(context)) state.error("a string that refers to a store path cannot be appended to a path") .atPos(pos) .withFrame(env, *this) @@ -2172,67 +2185,6 @@ void ExprPos::eval(EvalState & state, Env & env, Value & v) state.mkPos(v, pos); } -void ExprBlackHole::eval(EvalState & state, [[maybe_unused]] Env & env, Value & v) -{ - throwInfiniteRecursionError(state, v); -} - -[[gnu::noinline]] [[noreturn]] void ExprBlackHole::throwInfiniteRecursionError(EvalState & state, Value & v) -{ - state.error("infinite recursion encountered").atPos(v.determinePos(noPos)).debugThrow(); -} - -// always force this to be separate, otherwise forceValue may inline it and take -// a massive perf hit -[[gnu::noinline]] -void EvalState::handleEvalExceptionForThunk(Env * env, Expr * expr, Value & v, const PosIdx pos) -{ - if (!env) - tryFixupBlackHolePos(v, pos); - - auto e = std::current_exception(); - Value * recovery = nullptr; - try { - std::rethrow_exception(e); - } catch (const RecoverableEvalError & e) { - recovery = allocValue(); - } catch (...) { - } - if (recovery) { - recovery->mkThunk(env, expr); - } - v.mkFailed(e, recovery); -} - -[[gnu::noinline]] -void EvalState::handleEvalExceptionForApp(Value & v, const Value & savedApp) -{ - auto e = std::current_exception(); - Value * recovery = nullptr; - try { - std::rethrow_exception(e); - } catch (const RecoverableEvalError & e) { - recovery = allocValue(); - } catch (...) { - } - if (recovery) { - *recovery = savedApp; - } - v.mkFailed(e, recovery); -} - -[[gnu::noinline]] -void EvalState::handleEvalFailed(Value & v, const PosIdx pos) -{ - assert(v.isFailed()); - if (auto recoveryValue = v.failed().recoveryValue) { - v = *recoveryValue; - forceValue(v, pos); - } else { - v.failed().rethrow(); - } -} - void EvalState::tryFixupBlackHolePos(Value & v, PosIdx pos) { if (!v.isBlackhole()) @@ -2263,6 +2215,7 @@ void EvalState::forceValueDeep(Value & v) for (auto & i : *v.attrs()) try { // If the value is a thunk, we're evaling. Otherwise no trace necessary. + // FIXME: race, thunk might be updated by another thread auto dts = state.debugRepl && i.value->isThunk() ? makeDebugTraceStacker( state, *i.value->thunk().expr, @@ -2415,13 +2368,17 @@ std::string_view EvalState::forceStringNoCtx(Value & v, const PosIdx pos, std::s { auto s = forceString(v, pos, errorCtx); if (v.context()) { - auto ctxElem = NixStringContextElem::parse((*v.context()->begin())->view()); - error( - "the string '%1%' is not allowed to refer to a store path (such as '%2%')", - v.string_view(), - ctxElem.display(*store)) - .withTrace(pos, errorCtx) - .debugThrow(); + NixStringContext context; + copyContext(v, context); + if (hasContext(context)) { + auto ctxElem = NixStringContextElem::parse((*v.context()->begin())->view()); + error( + "the string '%1%' is not allowed to refer to a store path (such as '%2%')", + v.string_view(), + ctxElem.display(*store)) + .withTrace(pos, errorCtx) + .debugThrow(); + } } return s; } @@ -2478,14 +2435,24 @@ BackedStringView EvalState::coerceToString( } if (v.type() == nPath) { + // FIXME: instead of copying the path to the store, we could + // return a virtual store path that lazily copies the path to + // the store in devirtualize(). if (!canonicalizePath && !copyToStore) { // FIXME: hack to preserve path literals that end in a // slash, as in /foo/${x}. return v.pathStrView(); } else if (copyToStore) { - return store->printStorePath(copyPathToStore(context, v.path())); + return store->printStorePath(copyPathToStore(context, v.path(), v.determinePos(pos))); } else { - return std::string{v.path().path.abs()}; + auto path = v.path(); + if (path.accessor == rootFS && store->isInStore(path.path.abs())) { + try { + context.insert(NixStringContextElem::Path{.storePath = store->toStorePath(path.path.abs()).first}); + } catch (Error &) { + } + } + return std::string(path.path.abs()); } } @@ -2557,7 +2524,7 @@ BackedStringView EvalState::coerceToString( .debugThrow(); } -StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path) +StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos) { if (nix::isDerivation(path.path.abs())) error("file names are not allowed to end in '%1%'", drvExtension).debugThrow(); @@ -2570,7 +2537,7 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat *store, path.resolveSymlinks(SymlinkResolution::Ancestors), settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, - path.baseName(), + computeBaseName(path, pos), ContentAddressMethod::Raw::NixArchive, nullptr, repair); @@ -2622,7 +2589,9 @@ EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & con auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (auto storePath = store->maybeParseStorePath(path)) return *storePath; - error("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow(); + error("cannot coerce '%s' to a store path because it is not a subpath of the Nix store", path) + .withTrace(pos, errorCtx) + .debugThrow(); } std::pair EvalState::coerceToSingleDerivedPathUnchecked( @@ -2646,6 +2615,9 @@ std::pair EvalState::coerceToSingleDerivedP .debugThrow(); }, [&](NixStringContextElem::Built && b) -> SingleDerivedPath { return std::move(b); }, + [&](NixStringContextElem::Path && p) -> SingleDerivedPath { + error("string '%s' has no context", s).withTrace(pos, errorCtx).debugThrow(); + }, }, ((NixStringContextElem &&) *context.begin()).raw); return { @@ -3090,6 +3062,11 @@ void EvalState::printStatistics() topObj["nrOpUpdates"] = nrOpUpdates.load(); topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied.load(); topObj["nrThunks"] = nrThunks.load(); + topObj["nrThunksAwaited"] = nrThunksAwaited.load(); + topObj["nrThunksAwaitedSlow"] = nrThunksAwaitedSlow.load(); + topObj["nrSpuriousWakeups"] = nrSpuriousWakeups.load(); + topObj["maxWaiting"] = maxWaiting.load(); + topObj["waitingTime"] = microsecondsWaiting / (double) 1000000; topObj["nrAvoided"] = nrAvoided.load(); topObj["nrLookups"] = nrLookups.load(); topObj["nrPrimOpCalls"] = nrPrimOpCalls.load(); @@ -3141,10 +3118,10 @@ void EvalState::printStatistics() } if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") { + auto list = json::array(); + symbols.dump([&](std::string_view s) { list.emplace_back(std::string(s)); }); // XXX: overrides earlier assignment - topObj["symbols"] = json::array(); - auto & list = topObj["symbols"]; - symbols.dump([&](std::string_view s) { list.emplace_back(s); }); + topObj["symbols"] = std::move(list); } if (outPath == "-") { std::cerr << topObj.dump(2) << std::endl; @@ -3276,16 +3253,15 @@ SourcePath EvalState::findFile(const LookupPath & lookupPath, const std::string_ std::optional EvalState::resolveLookupPathPath(const LookupPath::Path & value0, bool initAccessControl) { auto & value = value0.s; - auto i = lookupPathResolved.find(value); - if (i != lookupPathResolved.end()) - return i->second; + if (auto cached = getConcurrent(*lookupPathResolved, value)) + return *cached; auto finish = [&](std::optional res) { if (res) debug("resolved search path element '%s' to '%s'", value, *res); else debug("failed to resolve search path element '%s'", value); - lookupPathResolved.emplace(value, res); + lookupPathResolved->emplace(std::string(value), res); return res; }; @@ -3345,19 +3321,21 @@ Expr * EvalState::parse( const SourcePath & basePath, const std::shared_ptr & staticEnv) { - DocCommentMap tmpDocComments; // Only used when not origin is not a SourcePath - DocCommentMap * docComments = &tmpDocComments; + auto tmpDocComments = make_ref(); - if (auto sourcePath = std::get_if(&origin)) { - auto [it, _] = positionToDocComment.try_emplace(*sourcePath); - docComments = &it->second; - } - - auto result = - parseExprFromBuf(text, length, origin, basePath, mem.exprs, symbols, settings, positions, *docComments, rootFS); + auto result = parseExprFromBuf( + text, length, origin, basePath, mem.exprs, symbols, settings, positions, *tmpDocComments, rootFS); result->bindVars(*this, staticEnv); + if (auto sourcePath = std::get_if(&origin)) + /* A single file might appear multiple times in PosTable if it's + parsed by scopedImport. If we are the first then emplace into the map, otherwise + copy our positions into the existing map. */ + positionToDocComment->emplace_or_visit(*sourcePath, tmpDocComments, [&tmpDocComments](auto & kv) { + kv.second->insert(tmpDocComments->begin(), tmpDocComments->end()); + }); + return result; } @@ -3368,20 +3346,22 @@ ExprAttrs * EvalState::parseReplBindings( const SourcePath & basePath, const std::shared_ptr & staticEnv) { - DocCommentMap tmpDocComments; - DocCommentMap * docComments = &tmpDocComments; - - if (auto sourcePath = std::get_if(&origin)) { - auto [it, _] = positionToDocComment.try_emplace(*sourcePath); - docComments = &it->second; - } + auto tmpDocComments = make_ref(); auto bindings = parseReplBindingsFromBuf( - text, length, origin, basePath, mem.exprs, symbols, settings, positions, *docComments, rootFS); + text, length, origin, basePath, mem.exprs, symbols, settings, positions, *tmpDocComments, rootFS); assert(bindings); bindings->bindVars(*this, staticEnv); + if (auto sourcePath = std::get_if(&origin)) + /* A single file might appear multiple times in PosTable if it's + parsed by scopedImport. If we are the first then emplace into the map, otherwise + copy our positions into the existing map. */ + positionToDocComment->emplace_or_visit(*sourcePath, tmpDocComments, [&tmpDocComments](auto & kv) { + kv.second->insert(tmpDocComments->begin(), tmpDocComments->end()); + }); + return bindings; } @@ -3392,14 +3372,12 @@ DocComment EvalState::getDocCommentForPos(PosIdx pos) if (!path) return {}; - auto table = positionToDocComment.find(*path); - if (table == positionToDocComment.end()) - return {}; - - auto it = table->second.find(pos); - if (it == table->second.end()) - return {}; - return it->second; + DocComment result; + positionToDocComment->visit(*path, [&](const auto & kv) { + if (auto it = kv.second->find(pos); it != kv.second->end()) + result = it->second; + }); + return result; } std::string ExternalValueBase::coerceToString( @@ -3431,4 +3409,24 @@ void forceNoNullByte(std::string_view s, std::function pos) } } +void EvalState::waitForPath(const StorePath & path) +{ + asyncPathWriter->waitForPath(path); +} + +void EvalState::waitForPath(const SingleDerivedPath & path) +{ + std::visit( + overloaded{ + [&](const DerivedPathOpaque & p) { waitForPath(p.path); }, + [&](const SingleDerivedPathBuilt & p) { waitForPath(*p.drvPath); }, + }, + path.raw()); +} + +void EvalState::waitForAllPaths() +{ + asyncPathWriter->waitForAllPaths(); +} + } // namespace nix diff --git a/src/libexpr/include/nix/expr/attr-path.hh b/src/libexpr/include/nix/expr/attr-path.hh index fd48705b8b7b..25384f5c4c87 100644 --- a/src/libexpr/include/nix/expr/attr-path.hh +++ b/src/libexpr/include/nix/expr/attr-path.hh @@ -25,6 +25,8 @@ struct AttrPath : std::vector static AttrPath parse(EvalState & state, std::string_view s); + static AttrPath fromStrings(EvalState & state, const std::vector & attrNames); + std::string to_string(EvalState & state) const; std::vector resolve(EvalState & state) const; diff --git a/src/libexpr/include/nix/expr/eval-cache.hh b/src/libexpr/include/nix/expr/eval-cache.hh index d2ead2bb4614..4fe7278ef07d 100644 --- a/src/libexpr/include/nix/expr/eval-cache.hh +++ b/src/libexpr/include/nix/expr/eval-cache.hh @@ -35,7 +35,13 @@ class EvalCache : public std::enable_shared_from_this friend struct CachedEvalError; std::shared_ptr db; + +public: EvalState & state; + + std::function cleanupAttrPath = [](AttrPath && attrPath) { return std::move(attrPath); }; + +private: typedef fun RootLoader; RootLoader rootLoader; RootValue value; @@ -99,7 +105,10 @@ class AttrCursor : public std::enable_shared_from_this friend class EvalCache; friend struct CachedEvalError; +public: ref root; + +private: using Parent = std::optional, Symbol>>; Parent parent; RootValue _value; @@ -127,8 +136,12 @@ public: AttrPath getAttrPath() const; + AttrPath getAttrPathRaw() const; + AttrPath getAttrPath(Symbol name) const; + AttrPath getAttrPathRaw(Symbol name) const; + std::string getAttrPathStr() const; std::string getAttrPathStr(Symbol name) const; diff --git a/src/libexpr/include/nix/expr/eval-inline.hh b/src/libexpr/include/nix/expr/eval-inline.hh index 9163238d4f6e..7e34029375e8 100644 --- a/src/libexpr/include/nix/expr/eval-inline.hh +++ b/src/libexpr/include/nix/expr/eval-inline.hh @@ -92,34 +92,68 @@ Env & EvalMemory::allocEnv(size_t size) return *env; } -[[gnu::always_inline]] -void EvalState::forceValue(Value & v, const PosIdx pos) +/** + * An identifier of the current thread for deadlock detection, stored + * in p0 of pending/awaited thunks. We're not using std::thread::id + * because it's not guaranteed to fit. + */ +extern thread_local uint32_t myEvalThreadId; + +template +void ValueStorage>>::force( + EvalState & state, PosIdx pos) { - if (v.isThunk()) { - Env * env = v.thunk().env; - assert(env || v.isBlackhole()); - Expr * expr = v.thunk().expr; - try { - v.mkBlackhole(); - if (env) [[likely]] - expr->eval(*this, *env, v); - else - ExprBlackHole::throwInfiniteRecursionError(*this, v); - } catch (...) { - handleEvalExceptionForThunk(env, expr, v, pos); - throw; - } - } else if (v.isApp()) { - Value savedApp = v; + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk) { try { - callFunction(*v.app().left, *v.app().right, v, pos); + // The value we get here is only valid if we can set the + // thunk to pending. + auto p1_ = p1; + + // Atomically set the thunk to "pending". + if (!p0.compare_exchange_strong( + p0_, + pdPending | (myEvalThreadId << discriminatorBits), + std::memory_order_acquire, + std::memory_order_acquire)) { + pd = static_cast(p0_ & discriminatorMask); + if (pd == pdPending || pd == pdAwaited) { + // The thunk is already "pending" or "awaited", so + // we need to wait for it. + p0_ = waitOnThunk(state, p0_); + goto done; + } + assert(pd != pdThunk); + // Another thread finished this thunk, no need to wait. + goto done; + } + + bool isApp = p1_ & discriminatorMask; + if (isApp) { + auto left = untagPointer(p0_); + auto right = untagPointer(p1_); + state.callFunction(*left, *right, (Value &) *this, pos); + } else { + auto env = untagPointer(p0_); + auto expr = untagPointer(p1_); + expr->eval(state, *env, (Value &) *this); + } } catch (...) { - handleEvalExceptionForApp(v, savedApp); + state.tryFixupBlackHolePos((Value &) *this, pos); + setStorage(new Value::Failed{.ex = std::current_exception()}); throw; } - } else if (v.isFailed()) { - handleEvalFailed(v, pos); } + + else if (pd == pdPending || pd == pdAwaited) + p0_ = waitOnThunk(state, p0_); + +done: + if (InternalType(p0_ & 0xff) == tFailed) + std::rethrow_exception((std::bit_cast(p1))->ex); } [[gnu::always_inline]] diff --git a/src/libexpr/include/nix/expr/eval-settings.hh b/src/libexpr/include/nix/expr/eval-settings.hh index d9dba95370b1..b58f0cd45943 100644 --- a/src/libexpr/include/nix/expr/eval-settings.hh +++ b/src/libexpr/include/nix/expr/eval-settings.hh @@ -122,7 +122,7 @@ struct EvalSettings : Config - `$HOME/.nix-defexpr/channels` - The [user channel link](@docroot@/command-ref/files/default-nix-expression.md#user-channel-link), pointing to the current state of [channels](@docroot@/command-ref/files/channels.md) for the current user. + The user channel link pointing to the current state of channels for the current user. - `nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs` @@ -132,7 +132,7 @@ struct EvalSettings : Config The current state of all channels for the `root` user. - These files are set up by the [Nix installer](@docroot@/installation/installing-binary.md). + These files are set up by the Nix installer. See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details on the environment variable. > **Note** @@ -173,7 +173,7 @@ struct EvalSettings : Config R"( If set to `true`, the Nix evaluator doesn't allow access to any files outside of - [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath), + [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath) or to URIs outside of [`allowed-uris`](@docroot@/command-ref/conf-file.md#conf-allowed-uris). )"}; @@ -302,7 +302,7 @@ struct EvalSettings : Config "ignore-try", R"( If set to true, ignore exceptions inside 'tryEval' calls when evaluating Nix expressions in - debug mode (using the --debugger flag). By default the debugger pauses on all exceptions. + debug mode (using the --debugger flag). By default, the debugger pauses on all exceptions. )"}; Setting traceVerbose{ @@ -320,7 +320,7 @@ struct EvalSettings : Config "debugger-on-trace", R"( If set to true and the `--debugger` flag is given, the following functions - enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break): + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). * [`builtins.trace`](@docroot@/language/builtins.md#builtins-trace) * [`builtins.traceVerbose`](@docroot@/language/builtins.md#builtins-traceVerbose) @@ -336,7 +336,7 @@ struct EvalSettings : Config "debugger-on-warn", R"( If set to true and the `--debugger` flag is given, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) - will enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). This is useful for debugging warnings in third-party Nix code. @@ -350,7 +350,7 @@ struct EvalSettings : Config R"( If set to true, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) throws an error when logging a warning. - This will give you a stack trace that leads to the location of the warning. + This gives you a stack trace that leads to the location of the warning. This is useful for finding information about warnings in third-party Nix code when you can not start the interactive debugger, such as when Nix is called from a non-interactive script. See [`debugger-on-warn`](#conf-debugger-on-warn). @@ -466,6 +466,44 @@ struct EvalSettings : Config The default value is chosen to balance performance and memory usage. On 32 bit systems where memory is scarce, the default is a large value to reduce the amount of allocations. )"}; + + Setting lazyTrees{ + this, + false, + "lazy-trees", + R"( + If set to true, flakes and trees fetched by [`builtins.fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) are only copied to the Nix store when they're used as a dependency of a derivation. This avoids copying (potentially large) source trees unnecessarily. + )"}; + + // FIXME: this setting should really be in libflake, but it's + // currently needed in mountInput(). + Setting lazyLocks{ + this, + false, + "lazy-locks", + R"( + If enabled, Nix only includes NAR hashes in lock file entries if they're necessary to lock the input (i.e. when there is no other attribute that allows the content to be verified, like a Git revision). + This is not backward compatible with older versions of Nix. + If disabled, lock file entries always contain a NAR hash. + )"}; + + Setting evalCores{ + this, + 1, + "eval-cores", + R"( + The number of threads used to evaluate Nix expressions. This currently affects the following commands: + + * `nix search` + * `nix flake check` + * `nix flake show` + * `nix eval --json` + * Any evaluation that uses `builtins.parallel` + + The value `0` causes Nix to use all available CPU cores in the system. + + Note that enabling the debugger (`--debugger`) disables multi-threaded evaluation. + )"}; }; /** @@ -473,4 +511,10 @@ struct EvalSettings : Config */ std::filesystem::path getNixDefExpr(); +/** + * Stack size for evaluator threads. This used to be 64 MiB, but macOS as deployed on GitHub Actions has a + * hard limit slightly under that, so we round it down a bit. + */ +constexpr size_t evalStackSize = 60 * 1024 * 1024; + } // namespace nix diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index bea616187b41..fec4c9dd57ec 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -51,6 +51,9 @@ struct SingleDerivedPath; enum RepairFlag : bool; struct MemorySourceAccessor; struct MountedSourceAccessor; +struct AsyncPathWriter; +struct Provenance; +struct Executor; namespace eval_cache { class EvalCache; @@ -225,7 +228,7 @@ struct StaticEvalSymbols line, column, functor, toString, right, wrong, structuredAttrs, json, allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites, maxSize, maxClosureSize, builder, args, contentAddressed, impure, outputHash, outputHashAlgo, outputHashMode, recurseForDerivations, description, self, epsilon, startSet, - operator_, key, path, prefix, outputSpecified; + operator_, key, path, prefix, outputSpecified, __meta; Expr::AstSymbols exprSymbols; @@ -278,6 +281,7 @@ struct StaticEvalSymbols .path = alloc.create("path"), .prefix = alloc.create("prefix"), .outputSpecified = alloc.create("outputSpecified"), + .__meta = alloc.create("__meta"), .exprSymbols = { .sub = alloc.create("__sub"), .lessThan = alloc.create("__lessThan"), @@ -420,6 +424,8 @@ public: std::list debugTraces; boost::unordered_flat_map> exprEnvs; + ref asyncPathWriter; + const std::shared_ptr getStaticEnv(const Expr & expr) const { auto i = exprEnvs.find(&expr); @@ -484,11 +490,11 @@ private: * Associate source positions of certain AST nodes with their preceding doc comment, if they have one. * Grouped by file. */ - boost::unordered_flat_map positionToDocComment; + const ref>> positionToDocComment; LookupPath lookupPath; - boost::unordered_flat_map, StringViewHash, std::equal_to<>> + const ref, StringViewHash, std::equal_to<>>> lookupPathResolved; /** @@ -576,7 +582,12 @@ public: /** * Mount an input on the Nix store. */ - StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor); + StorePath mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash = false); /** * Parse a Nix expression from the specified file. @@ -650,25 +661,10 @@ public: * application, call the function and overwrite `v` with the * result. Otherwise, this is a no-op. */ - inline void forceValue(Value & v, const PosIdx pos); - -private: - - /** - * Internal support function for forceValue - * - * This code is factored out so that it's not in the heavily inlined hot path. - */ - void handleEvalExceptionForThunk(Env * env, Expr * expr, Value & v, const PosIdx pos); - - /** - * Internal support function for forceValue - * - * This code is factored out so that it's not in the heavily inlined hot path. - */ - void handleEvalExceptionForApp(Value & v, const Value & savedApp); - - void handleEvalFailed(Value & v, PosIdx pos); + inline void forceValue(Value & v, const PosIdx pos) + { + v.force(*this, pos); + } void tryFixupBlackHolePos(Value & v, PosIdx pos); @@ -728,6 +724,12 @@ public: std::optional tryAttrsToString( const PosIdx pos, Value & v, NixStringContext & context, bool coerceMore = false, bool copyToStore = true); + StorePath devirtualize(const StorePath & path, StringMap * rewrites = nullptr); + + SingleDerivedPath devirtualize(const SingleDerivedPath & path, StringMap * rewrites = nullptr); + + std::string devirtualize(std::string_view s, const NixStringContext & context); + /** * String coercion. * @@ -745,7 +747,19 @@ public: bool copyToStore = true, bool canonicalizePath = true); - StorePath copyPathToStore(NixStringContext & context, const SourcePath & path); + StorePath copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos); + + /** + * Compute the base name for a `SourcePath`. For non-store paths, + * this is just `SourcePath::baseName()`. But for store paths, for + * backwards compatibility, it needs to be `-source`, + * i.e. as if the path were copied to the Nix store. This results + * in a "double-copied" store path like + * `/nix/store/--source`. We don't need to + * materialize /nix/store/-source though. Still, this + * requires reading/hashing the path twice. + */ + std::string computeBaseName(const SourcePath & path, PosIdx pos); /** * Path coercion. @@ -895,10 +909,11 @@ private: const std::shared_ptr & staticEnv); /** - * Current Nix call stack depth, used with `max-call-depth` setting to throw stack overflow hopefully before we run - * out of system stack. + * Current Nix call stack depth, used with `max-call-depth` + * setting to throw stack overflow hopefully before we run out of + * system stack. */ - size_t callDepth = 0; + thread_local static size_t callDepth; public: @@ -1051,6 +1066,10 @@ public: DocComment getDocCommentForPos(PosIdx pos); + void waitForPath(const StorePath & path); + void waitForPath(const SingleDerivedPath & path); + void waitForAllPaths(); + private: /** @@ -1076,8 +1095,18 @@ private: Counter nrPrimOpCalls; Counter nrFunctionCalls; +public: + Counter nrThunksAwaited; + Counter nrThunksAwaitedSlow; + Counter microsecondsWaiting; + Counter currentlyWaiting; + Counter maxWaiting; + Counter nrSpuriousWakeups; + +private: bool countCalls; + // FIXME: make thread-safe. typedef boost::unordered_flat_map> PrimOpCalls; PrimOpCalls primOpCalls; @@ -1089,6 +1118,7 @@ private: void incrFunctionCall(ExprLambda * fun); + // FIXME: make thread-safe. typedef boost::unordered_flat_map> AttrSelects; AttrSelects attrSelects; @@ -1106,6 +1136,56 @@ private: friend struct Value; friend class ListBuilder; + +public: + + /** + * Per-thread evaluation context. This context is propagated to worker threads when a value is evaluated + * asynchronously. + */ + struct EvalContext + { + std::shared_ptr provenance; + }; + + thread_local static EvalContext evalContext; + + /** + * Create a work item that propagates the current evaluation context. + */ + template + auto makeWork(T && t) + { + return [this, t{std::move(t)}, evalContext(evalContext)]() { + this->evalContext = evalContext; + t(); + }; + } + + /** + * Add a work item to the given work vector that propagates the current evaluation context. + */ + template + void addWork(WorkItems & work, uint8_t priority, T && t) + { + work.emplace_back(makeWork(std::move(t)), priority); + } + + template + void spawn(FuturesVector & futures, uint8_t priority, T && t) + { + futures.spawn(priority, makeWork(std::move(t))); + } + + /** + * Worker threads manager. + * + * Note: keep this last to ensure that it's destroyed first, so we + * don't have any background work items (e.g. from + * `builtins.parallel`) referring to a partially destroyed + * `EvalState`. + */ + ref executor; }; struct DebugTraceStacker @@ -1142,6 +1222,24 @@ SourcePath resolveExprPath(SourcePath path, bool addDefaultNix = true); */ bool isAllowedURI(std::string_view uri, const Strings & allowedPaths); +struct PushProvenance +{ + EvalState & state; + std::shared_ptr prev; + + PushProvenance(EvalState & state, std::shared_ptr prov) + : state(state) + { + state.evalContext.provenance.swap(prev); + state.evalContext.provenance.swap(prov); + } + + ~PushProvenance() + { + state.evalContext.provenance.swap(prev); + } +}; + } // namespace nix #include "nix/expr/eval-inline.hh" diff --git a/src/libexpr/include/nix/expr/meson.build b/src/libexpr/include/nix/expr/meson.build index 4213476fe73a..3191fd2dc148 100644 --- a/src/libexpr/include/nix/expr/meson.build +++ b/src/libexpr/include/nix/expr/meson.build @@ -25,11 +25,13 @@ headers = [ config_pub_h ] + files( 'get-drvs.hh', 'json-to-value.hh', 'nixexpr.hh', + 'parallel-eval.hh', 'parser-state.hh', 'primops.hh', 'print-ambiguous.hh', 'print-options.hh', 'print.hh', + 'provenance.hh', 'repl-exit-status.hh', 'search-path.hh', 'static-string-data.hh', diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index df39ecdde913..9bce1a9b91ad 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -8,7 +8,6 @@ #include #include -#include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" #include "nix/expr/symbol-table.hh" #include "nix/expr/eval-error.hh" @@ -91,8 +90,6 @@ typedef std::vector AttrSelectionPath; std::string showAttrSelectionPath(const SymbolTable & symbols, std::span attrPath); -using UpdateQueue = SmallTemporaryValueVector; - /* Abstract syntax of Nix expressions. */ struct Expr @@ -123,14 +120,6 @@ struct Expr * of thunks allocated. */ virtual Value * maybeThunk(EvalState & state, Env & env); - - /** - * Only called when performing an attrset update: `//` or similar. - * Instead of writing to a Value &, this function writes to an UpdateQueue. - * This allows the expression to perform multiple updates in a delayed manner, gathering up all the updates before - * applying them. - */ - virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx); virtual void setName(Symbol name); virtual void setDocComment(DocComment docComment) {}; @@ -738,7 +727,7 @@ struct ExprOpNot : Expr struct name : Expr \ { \ MakeBinOpMembers(name, s) \ - } + }; MakeBinOp(ExprOpEq, "=="); MakeBinOp(ExprOpNEq, "!="); @@ -749,14 +738,7 @@ MakeBinOp(ExprOpConcatLists, "++"); struct ExprOpUpdate : Expr { -private: - /** Special case for merging of two attrsets. */ - void eval(EvalState & state, Value & v, Value & v1, Value & v2); - void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q); - -public: - MakeBinOpMembers(ExprOpUpdate, "//"); - virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) override; + MakeBinOpMembers(ExprOpUpdate, "//") }; struct ExprConcatStrings : Expr @@ -811,23 +793,11 @@ struct ExprPos : Expr COMMON_METHODS }; -/* only used to mark thunks as black holes. */ -struct ExprBlackHole : Expr -{ - void show(const SymbolTable & symbols, std::ostream & str) const override {} - - void eval(EvalState & state, Env & env, Value & v) override; - - void bindVars(EvalState & es, const std::shared_ptr & env) override {} - - [[noreturn]] static void throwInfiniteRecursionError(EvalState & state, Value & v); -}; - -extern ExprBlackHole eBlackHole; - class Exprs { - std::pmr::monotonic_buffer_resource buffer; + // FIXME: use std::pmr::monotonic_buffer_resource when parallel + // eval is disabled? + std::pmr::synchronized_pool_resource buffer; public: std::pmr::polymorphic_allocator alloc{&buffer}; diff --git a/src/libexpr/include/nix/expr/parallel-eval.hh b/src/libexpr/include/nix/expr/parallel-eval.hh new file mode 100644 index 000000000000..27d002e69ca2 --- /dev/null +++ b/src/libexpr/include/nix/expr/parallel-eval.hh @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include "nix/util/sync.hh" +#include "nix/util/logging.hh" +#include "nix/util/environment-variables.hh" +#include "nix/util/util.hh" +#include "nix/util/signals.hh" + +#if NIX_USE_BOEHMGC +# include +#endif + +namespace nix { + +struct Executor +{ + using work_t = std::function; + + struct Item + { + std::promise promise; + work_t work; + }; + + struct State + { + std::multimap queue; + std::vector threads; + }; + + std::atomic_bool quit{false}; + + const unsigned int evalCores; + + const bool enabled; + + const std::unique_ptr interruptCallback; + + Sync state_; + + std::condition_variable wakeup; + + static unsigned int getEvalCores(const EvalSettings & evalSettings); + + Executor(const EvalSettings & evalSettings); + + ~Executor(); + + void createWorker(State & state); + + void worker(); + + using WorkItems = std::vector>; + + std::vector> spawn(WorkItems && items); + + static thread_local bool amWorkerThread; +}; + +struct FutureVector +{ + Executor & executor; + + struct State + { + std::vector> futures; + }; + + Sync state_; + + ~FutureVector(); + + // FIXME: add a destructor that cancels/waits for all futures. + + void spawn(Executor::WorkItems && work); + + void spawn(uint8_t prioPrefix, Executor::work_t && work) + { + spawn({{std::move(work), prioPrefix}}); + } + + void finishAll(); +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/provenance.hh b/src/libexpr/include/nix/expr/provenance.hh new file mode 100644 index 000000000000..f4cc887a6b2a --- /dev/null +++ b/src/libexpr/include/nix/expr/provenance.hh @@ -0,0 +1,23 @@ +#pragma once + +#include "nix/util/provenance.hh" + +namespace nix { + +/** + * Provenance indicating that this store path was instantiated by the `derivation` builtin function. Its main purpose is + * to record `meta` fields. + */ +struct DerivationProvenance : Provenance +{ + std::shared_ptr next; + ref meta; + + DerivationProvenance(std::shared_ptr next, ref meta) + : next(std::move(next)) + , meta(std::move(meta)) {}; + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/symbol-table.hh b/src/libexpr/include/nix/expr/symbol-table.hh index f0220376c53f..231510829331 100644 --- a/src/libexpr/include/nix/expr/symbol-table.hh +++ b/src/libexpr/include/nix/expr/symbol-table.hh @@ -2,13 +2,14 @@ ///@file #include + #include "nix/expr/value.hh" -#include "nix/expr/static-string-data.hh" -#include "nix/util/chunked-vector.hh" #include "nix/util/error.hh" +#include "nix/util/sync.hh" +#include "nix/util/alignment.hh" #include -#include +#include namespace nix { @@ -17,17 +18,27 @@ class SymbolValue : protected Value friend class SymbolStr; friend class SymbolTable; - uint32_t idx; - - SymbolValue() = default; - -public: operator std::string_view() const noexcept { return string_view(); } }; +struct ContiguousArena +{ + const char * data; + const size_t maxSize; + + // Put this in a separate cache line to ensure that a thread + // adding a symbol doesn't slow down threads dereferencing symbols + // by invalidating the read-only `data` field. + alignas(64) std::atomic size{0}; + + ContiguousArena(size_t maxSize); + + size_t allocate(size_t bytes); +}; + class StaticSymbolTable; /** @@ -42,6 +53,7 @@ class Symbol friend class StaticSymbolTable; private: + /// The offset of the symbol in `SymbolTable::arena`. uint32_t id; explicit constexpr Symbol(uint32_t id) noexcept @@ -73,6 +85,8 @@ public: constexpr auto operator<=>(const Symbol & other) const noexcept = default; friend class std::hash; + + constexpr static size_t alignment = alignof(SymbolValue); }; /** @@ -84,25 +98,20 @@ class SymbolStr { friend class SymbolTable; - constexpr static size_t chunkSize{8192}; - using SymbolValueStore = ChunkedVector; - const SymbolValue * s; struct Key { using HashType = boost::hash; - SymbolValueStore & store; std::string_view s; std::size_t hash; - std::pmr::memory_resource & resource; + ContiguousArena & arena; - Key(SymbolValueStore & store, std::string_view s, std::pmr::memory_resource & stringMemory) - : store(store) - , s(s) + Key(std::string_view s, ContiguousArena & arena) + : s(s) , hash(HashType{}(s)) - , resource(stringMemory) + , arena(arena) { } }; @@ -113,22 +122,7 @@ public: { } - SymbolStr(const Key & key) - { - auto size = key.s.size(); - if (size >= std::numeric_limits::max()) { - throw Error("Size of symbol exceeds 4GiB and cannot be stored"); - } - // for multi-threaded implementations: lock store and allocator here - const auto & [v, idx] = key.store.add(SymbolValue{}); - if (size == 0) { - v.mkStringNoCopy(""_sds, nullptr); - } else { - v.mkStringNoCopy(StringData::make(key.resource, key.s)); - } - v.idx = idx; - this->s = &v; - } + SymbolStr(const Key & key); bool operator==(std::string_view s2) const noexcept { @@ -157,11 +151,7 @@ public: [[gnu::always_inline]] bool empty() const noexcept { - auto * p = &s->string_data(); - // Save a dereference in the sentinel value case - if (p == &""_sds) - return true; - return p->size() == 0; + return !s->string_data().size(); } [[gnu::always_inline]] @@ -176,11 +166,6 @@ public: return s; } - explicit operator Symbol() const noexcept - { - return Symbol{s->idx + 1}; - } - struct Hash { using is_transparent = void; @@ -218,6 +203,11 @@ public: return operator()(b, a); } }; + + constexpr static size_t computeSize(std::string_view s) + { + return alignUp(sizeof(Value) + sizeof(StringData) + s.size() + 1, Symbol::alignment); + } }; class SymbolTable; @@ -237,6 +227,7 @@ class StaticSymbolTable std::array symbols; std::size_t size = 0; + std::size_t nextId = alignof(SymbolValue); public: constexpr StaticSymbolTable() = default; @@ -245,8 +236,9 @@ public: { /* No need to check bounds because out of bounds access is a compilation error. */ - auto sym = Symbol(size + 1); //< +1 because Symbol with id = 0 is reserved + auto sym = Symbol(nextId); symbols[size++] = {str, sym}; + nextId += SymbolStr::computeSize(str); return sym; } @@ -264,61 +256,67 @@ private: * SymbolTable is an append only data structure. * During its lifetime the monotonic buffer holds all strings and nodes, if the symbol set is node based. */ - std::pmr::monotonic_buffer_resource buffer; - SymbolStr::SymbolValueStore store{16}; + ContiguousArena arena; /** - * Transparent lookup of string view for a pointer to a ChunkedVector entry -> return offset into the store. - * ChunkedVector references are never invalidated. + * Transparent lookup of string view for a pointer to a + * SymbolValue in the arena. */ - boost::unordered_flat_set symbols{SymbolStr::chunkSize}; + boost::concurrent_flat_set symbols; public: SymbolTable(const StaticSymbolTable & staticSymtab) + : arena(1 << 30) { + // Reserve symbol ID 0 and ensure alignment of the first allocation. + arena.allocate(Symbol::alignment); + staticSymtab.copyIntoSymbolTable(*this); } /** * Converts a string into a symbol. */ - Symbol create(std::string_view s) - { - // Most symbols are looked up more than once, so we trade off insertion performance - // for lookup performance. - // FIXME: make this thread-safe. - return Symbol(*symbols.insert(SymbolStr::Key{store, s, buffer}).first); - } + Symbol create(std::string_view s); std::vector resolve(const std::span & symbols) const { std::vector result; result.reserve(symbols.size()); - for (auto sym : symbols) + for (auto & sym : symbols) result.push_back((*this)[sym]); return result; } SymbolStr operator[](Symbol s) const { - uint32_t idx = s.id - uint32_t(1); - if (idx >= store.size()) - unreachable(); - return store[idx]; + assert(s.id); + // Note: we don't check arena.size here to avoid a dependency + // on other threads creating new symbols. + return SymbolStr(*reinterpret_cast(arena.data + s.id)); } - [[gnu::always_inline]] size_t size() const noexcept { - return store.size(); + return symbols.size(); } - size_t totalSize() const; + size_t totalSize() const + { + return arena.size; + } template void dump(T callback) const { - store.forEach(callback); + std::string_view left{arena.data, arena.size}; + left = left.substr(Symbol::alignment); + while (!left.empty()) { + auto v = reinterpret_cast(left.data()); + callback(v->string_view()); + left = left.substr( + alignUp(sizeof(SymbolValue) + sizeof(StringData) + v->string_view().size() + 1, Symbol::alignment)); + } } }; diff --git a/src/libexpr/include/nix/expr/value.hh b/src/libexpr/include/nix/expr/value.hh index e1b2bc4e25a5..f41f7f89af43 100644 --- a/src/libexpr/include/nix/expr/value.hh +++ b/src/libexpr/include/nix/expr/value.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include #include #include #include @@ -27,6 +28,19 @@ namespace nix { struct Value; class BindingsBuilder; +static constexpr int discriminatorBits = 3; + +enum PrimaryDiscriminator : int { + pdSingleDWord = 0, + pdThunk = 1, + pdPending = 2, + pdAwaited = 3, + pdPairOfPointers = 4, + pdListN = 5, // FIXME: get rid of this by putting the size in the first word + pdString = 6, + pdPath = 7, // FIXME: get rid of this by ditching the `accessor` field +}; + /** * Internal type discriminator, which is more detailed than `ValueType`, as * it specifies the exact representation used (for types that have multiple @@ -36,32 +50,50 @@ class BindingsBuilder; * about how this is mapped into the alignment bits to save significant memory. * This also restricts the number of internal types represented with distinct memory layouts. */ -enum InternalType { - tUninitialized = 0, - /* layout: Single/zero field payload */ - tInt = 1, - tBool, - tNull, - tFloat, - tFailed, - tExternal, - tPrimOp, - tAttrs, - /* layout: Pair of pointers payload */ - tFirstPairOfPointers, - tListSmall = tFirstPairOfPointers, - tPrimOpApp, - tApp, - tThunk, - tLambda, - tLastPairOfPointers = tLambda, - /* layout: Single untaggable field */ - tFirstSingleUntaggable, - tListN = tFirstSingleUntaggable, - tString, - tPath, - tNumberOfInternalTypes, // Must be last -}; +typedef enum { + /* Values that have more type bits in the first word, and the + payload (a single word) in the second word. */ + tUninitialized = PrimaryDiscriminator::pdSingleDWord | (0 << discriminatorBits), + tInt = PrimaryDiscriminator::pdSingleDWord | (1 << discriminatorBits), + tFloat = PrimaryDiscriminator::pdSingleDWord | (2 << discriminatorBits), + tBool = PrimaryDiscriminator::pdSingleDWord | (3 << discriminatorBits), + tNull = PrimaryDiscriminator::pdSingleDWord | (4 << discriminatorBits), + tAttrs = PrimaryDiscriminator::pdSingleDWord | (5 << discriminatorBits), + tPrimOp = PrimaryDiscriminator::pdSingleDWord | (6 << discriminatorBits), + tFailed = PrimaryDiscriminator::pdSingleDWord | (7 << discriminatorBits), + tExternal = PrimaryDiscriminator::pdSingleDWord | (8 << discriminatorBits), + + /* Thunks. */ + tThunk = PrimaryDiscriminator::pdThunk | (0 << discriminatorBits), + tApp = PrimaryDiscriminator::pdThunk | (1 << discriminatorBits), + + tPending = PrimaryDiscriminator::pdPending, + tAwaited = PrimaryDiscriminator::pdAwaited, + + /* Values that consist of two pointers. The second word contains + more type bits in its alignment niche. */ + tListSmall = PrimaryDiscriminator::pdPairOfPointers | (0 << discriminatorBits), + tPrimOpApp = PrimaryDiscriminator::pdPairOfPointers | (1 << discriminatorBits), + tLambda = PrimaryDiscriminator::pdPairOfPointers | (2 << discriminatorBits), + + /* Special values. */ + tListN = PrimaryDiscriminator::pdListN, + tString = PrimaryDiscriminator::pdString, + tPath = PrimaryDiscriminator::pdPath, +} InternalType; + +/** + * Return true if `type` denotes a "finished" value, i.e. a weak-head + * normal form. + * + * Note that tPrimOpApp is considered "finished" because it represents + * a primop call with an incomplete number of arguments, and therefore + * cannot be evaluated further. + */ +inline bool isFinished(InternalType t) +{ + return t != tUninitialized && t != tThunk && t != tApp && t != tPending && t != tAwaited; +} /** * This type abstracts over all actual value types in the language, @@ -87,7 +119,6 @@ class Bindings; struct Env; struct Expr; struct ExprLambda; -struct ExprBlackHole; struct PrimOp; class Symbol; class SymbolStr; @@ -292,7 +323,7 @@ namespace detail { /** * Implementation mixin class for defining the public types - * In can be inherited from by the actual ValueStorage implementations + * In can be inherited by the actual ValueStorage implementations * for free due to Empty Base Class Optimization (EBCO). */ struct ValueBase @@ -428,34 +459,9 @@ struct ValueBase Value * const * elems; }; - struct Failed : gc_cleanup + struct Failed : gc { std::exception_ptr ex; - /** - * Optional value for recovering `RecoverableEvalError` - * Must be set iff `ex` is an instance of `RecoverableEvalError`. - */ - Value * recoveryValue; - - Failed(std::exception_ptr ex, Value * recoveryValue) - : ex(ex) - , recoveryValue(recoveryValue) - { - } - - [[noreturn]] void rethrow() const - { - try { - std::rethrow_exception(ex); - } catch (BaseError & e) { - /* Rethrow the copy of the exception - not the original one. - Stack tracing mechanisms rely on being able to modify the exceptions - they catch by reference. */ - e.throwClone(); - } catch (...) { - throw; - } - } }; }; @@ -587,12 +593,44 @@ class alignas(16) }; using PackedPointer = typename PackedPointerTypeStruct::type; - using Payload = std::array; - Payload payload = {}; - static constexpr int discriminatorBits = 3; + /** + * For multithreaded evaluation, we have to make sure that thunks/apps + * (the only mutable types of values) are updated in a safe way. A + * value can have the following states (see `force()`): + * + * * "thunk"/"app". When forced, this value transitions to + * "pending". The current thread will evaluate the + * thunk/app. When done, it will override the value with the + * result. If the value is at that point in the "awaited" state, + * the thread will wake up any waiting threads. + * + * * "pending". This means it's currently being evaluated. If + * another thread forces this value, it transitions to "awaited" + * and the thread will wait for the value to be updated (see + * `waitOnThunk()`). + * + * * "awaited". Like pending, only it means that there already are + * one or more threads waiting for this thunk. + * + * To ensure race-free access, the non-atomic word `p1` must + * always be updated before `p0`. Writes to `p0` should use + * *release* semantics (so that `p1` and any referenced values become + * visible to threads that read `p0`), and reads from `p0` should + * use `*acquire* semantics. + * + * Note: at some point, we may want to switch to 128-bit atomics + * so that `p0` and `p1` can be updated together + * atomically. However, 128-bit atomics are a bit problematic at + * present on x86_64 (see + * e.g. https://ibraheem.ca/posts/128-bit-atomics/). + */ + std::atomic p0{0}; + PackedPointer p1{0}; + static constexpr PackedPointer discriminatorMask = (PackedPointer(1) << discriminatorBits) - 1; + // FIXME: move/update /** * The value is stored as a pair of 8-byte double words. All pointers are assumed * to be 8-byte aligned. This gives us at most 6 bits of discriminator bits @@ -622,15 +660,6 @@ class alignas(16) * The primary discriminator with value 0 is reserved for uninitialized Values, * which are useful for diagnostics in C bindings. */ - enum PrimaryDiscriminator : int { - pdUninitialized = 0, - pdSingleDWord, //< layout: Single/zero field payload - /* The order of these enumerations must be the same as in InternalType. */ - pdListN, //< layout: Single untaggable field. - pdString, - pdPath, - pdPairOfPointers, //< layout: Pair of pointers payload - }; template requires std::is_pointer_v @@ -641,7 +670,7 @@ class alignas(16) PrimaryDiscriminator getPrimaryDiscriminator() const noexcept { - return static_cast(payload[0] & discriminatorMask); + return static_cast(p0 & discriminatorMask); } static void assertAligned(PackedPointer val) noexcept @@ -649,13 +678,30 @@ class alignas(16) assert((val & discriminatorMask) == 0 && "Pointer is not 8 bytes aligned"); } + void finish(PackedPointer p0_, PackedPointer p1_) + { + // Note: p1 *must* be updated before p0. + p1 = p1_; + p0_ = p0.exchange(p0_, std::memory_order_release); + + auto pd = static_cast(p0_ & discriminatorMask); + if (pd == pdPending) + // Nothing to do; no thread is waiting on this thunk. + ; + else if (pd == pdAwaited) + // Slow path: wake up the threads that are waiting on this + // thunk. + notifyWaiters(); + else if (pd == pdThunk) + unreachable(); + } + template void setSingleDWordPayload(PackedPointer untaggedVal) noexcept { - /* There's plenty of free upper bits in the first dword, which is - used only for the discriminator. */ - payload[0] = static_cast(pdSingleDWord) | (static_cast(type) << discriminatorBits); - payload[1] = untaggedVal; + /* There's plenty of free upper bits in the first byte, which + is used only for the discriminator. */ + finish(static_cast(type), untaggedVal); } template @@ -664,32 +710,42 @@ class alignas(16) static_assert(discriminator >= pdListN && discriminator <= pdPath); auto firstFieldPayload = std::bit_cast(firstPtrField); assertAligned(firstFieldPayload); - payload[0] = static_cast(discriminator) | firstFieldPayload; - payload[1] = std::bit_cast(untaggableField); + finish(static_cast(discriminator) | firstFieldPayload, std::bit_cast(untaggableField)); } template void setPairOfPointersPayload(T * firstPtrField, U * secondPtrField) noexcept { - static_assert(type >= tFirstPairOfPointers && type <= tLastPairOfPointers); - { - auto firstFieldPayload = std::bit_cast(firstPtrField); - assertAligned(firstFieldPayload); - payload[0] = static_cast(pdPairOfPointers) | firstFieldPayload; - } - { - auto secondFieldPayload = std::bit_cast(secondPtrField); - assertAligned(secondFieldPayload); - payload[1] = (type - tFirstPairOfPointers) | secondFieldPayload; - } + static_assert(type >= tListSmall && type <= tLambda); + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + finish( + static_cast(pdPairOfPointers) | firstFieldPayload, + ((type - tListSmall) >> discriminatorBits) | secondFieldPayload); + } + + template + void setThunkPayload(T * firstPtrField, U * secondPtrField) noexcept + { + static_assert(type >= tThunk && type <= tApp); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + p1 = ((type - tThunk) >> discriminatorBits) | secondFieldPayload; + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + // Note: awaited values can never become a thunk, so no need + // to check for waiters. + p0.store(static_cast(pdThunk) | firstFieldPayload, std::memory_order_release); } template requires std::is_pointer_v && std::is_pointer_v void getPairOfPointersPayload(T & firstPtrField, U & secondPtrField) const noexcept { - firstPtrField = untagPointer(payload[0]); - secondPtrField = untagPointer(payload[1]); + firstPtrField = untagPointer(p0); + secondPtrField = untagPointer(p1); } protected: @@ -697,42 +753,45 @@ protected: InternalType getInternalType() const noexcept { switch (auto pd = getPrimaryDiscriminator()) { - case pdUninitialized: - /* Discriminator value of zero is used to distinguish uninitialized values. */ - return tUninitialized; case pdSingleDWord: - /* Payloads that only use up a single double word store the InternalType - in the upper bits of the first double word. */ - return InternalType(payload[0] >> discriminatorBits); + /* Payloads that only use up a single double word store + the full InternalType in the first byte. */ + return InternalType(p0 & 0xff); + case pdThunk: + return static_cast(tThunk + ((p1 & discriminatorMask) << discriminatorBits)); + case pdPending: + return tPending; + case pdAwaited: + return tAwaited; + case pdPairOfPointers: + return static_cast(tListSmall + ((p1 & discriminatorMask) << discriminatorBits)); /* The order must match that of the enumerations defined in InternalType. */ case pdListN: case pdString: case pdPath: - return static_cast(tFirstSingleUntaggable + (pd - pdListN)); - case pdPairOfPointers: - return static_cast(tFirstPairOfPointers + (payload[1] & discriminatorMask)); + return static_cast(tListN + (pd - pdListN)); [[unlikely]] default: nixUnreachableWhenHardened(); } } -#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, MEMBER_A, MEMBER_B) \ - \ - void getStorage(TYPE & val) const noexcept \ - { \ - getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ - } \ - \ - void setStorage(TYPE val) noexcept \ - { \ - setPairOfPointersPayload>(val MEMBER_A, val MEMBER_B); \ +#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, SET, MEMBER_A, MEMBER_B) \ + \ + void getStorage(TYPE & val) const noexcept \ + { \ + getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ + } \ + \ + void setStorage(TYPE val) noexcept \ + { \ + SET>(val MEMBER_A, val MEMBER_B); \ } - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, [0], [1]) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, .env, .expr) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, setPairOfPointersPayload, [0], [1]) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, setPairOfPointersPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, setPairOfPointersPayload, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, setThunkPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, setThunkPayload, .env, .expr) #undef NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS @@ -740,57 +799,57 @@ protected: { /* PackedPointerType -> int64_t here is well-formed, since the standard requires this conversion to follow 2's complement rules. This is just a no-op. */ - integer = NixInt(payload[1]); + integer = NixInt(p1); } void getStorage(bool & boolean) const noexcept { - boolean = payload[1]; + boolean = p1; } void getStorage(Null & null) const noexcept {} void getStorage(NixFloat & fpoint) const noexcept { - fpoint = std::bit_cast(payload[1]); + fpoint = std::bit_cast(p1); } void getStorage(ExternalValueBase *& external) const noexcept { - external = std::bit_cast(payload[1]); + external = std::bit_cast(p1); } void getStorage(PrimOp *& primOp) const noexcept { - primOp = std::bit_cast(payload[1]); + primOp = std::bit_cast(p1); } void getStorage(Bindings *& attrs) const noexcept { - attrs = std::bit_cast(payload[1]); + attrs = std::bit_cast(p1); } void getStorage(List & list) const noexcept { - list.elems = untagPointer(payload[0]); - list.size = payload[1]; + list.elems = untagPointer(p0); + list.size = p1; } void getStorage(StringWithContext & string) const noexcept { - string.context = untagPointer(payload[0]); - string.str = std::bit_cast(payload[1]); + string.context = untagPointer(p0); + string.str = std::bit_cast(p1); } void getStorage(Path & path) const noexcept { - path.accessor = untagPointer(payload[0]); - path.path = std::bit_cast(payload[1]); + path.accessor = untagPointer(p0); + path.path = std::bit_cast(p1); } void getStorage(Failed *& failed) const noexcept { - failed = std::bit_cast(payload[1]); + failed = std::bit_cast(p1); } void setStorage(NixInt integer) noexcept @@ -847,8 +906,80 @@ protected: { setSingleDWordPayload(std::bit_cast(failed)); } + + ValueStorage() {} + + ValueStorage(const ValueStorage & v) + { + *this = v; + } + + /** + * Copy a value. This is not allowed to be a thunk to avoid + * accidental work duplication. + */ + ValueStorage & operator=(const ValueStorage & v) + { + auto p0_ = v.p0.load(std::memory_order_acquire); + auto p1_ = v.p1; // must be loaded after p0 + auto pd = static_cast(p0_ & discriminatorMask); + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) + unreachable(); + finish(p0_, p1_); + return *this; + } + +public: + + /** + * Check whether forcing this value requires a trivial amount of + * computation. A value is trivial if it's finished or if it's a + * thunk whose expression is an attrset with no dynamic + * attributes, a lambda or a list. Note that it's up to the caller + * to check whether the members of those attrsets or lists must be + * trivial. + */ + bool isTrivial() const; + + inline void reset() + { + p1 = 0; + p0.store(0, std::memory_order_relaxed); + } + + /// Only used for testing. + inline void mkBlackhole() + { + p0.store(pdPending, std::memory_order_relaxed); + } + + void force(EvalState & state, PosIdx pos); + +private: + + /** + * Given a thunk that was observed to be in the pending or awaited + * state, wait for it to finish. Returns the first word of the + * value. + */ + PackedPointer waitOnThunk(EvalState & state, PackedPointer p0); + + /** + * Wake up any threads that are waiting on this value. + */ + void notifyWaiters(); }; +template<> +void ValueStorage::notifyWaiters(); + +template<> +ValueStorage::PackedPointer +ValueStorage::waitOnThunk(EvalState & state, PackedPointer p0); + +template<> +bool ValueStorage::isTrivial() const; + /** * View into a list of Value * that is itself immutable. * @@ -1090,11 +1221,16 @@ public: void print(EvalState & state, std::ostream & str, PrintOptions options = PrintOptions{}); + // FIXME: optimize, only look at first word + inline bool isFinished() const + { + return nix::isFinished(getInternalType()); + } + // Functions needed to distinguish the type // These should be removed eventually, by putting the functionality that's // needed by callers into methods of this type - // type() == nThunk inline bool isThunk() const { return isa(); @@ -1105,7 +1241,11 @@ public: return isa(); } - inline bool isBlackhole() const; + inline bool isBlackhole() const + { + auto t = getInternalType(); + return t == tPending || t == tAwaited; + } // type() == nFunction inline bool isLambda() const @@ -1131,17 +1271,13 @@ public: /** * Returns the normal type of a Value. This only returns nThunk if * the Value hasn't been forceValue'd - * - * @param invalidIsThunk Instead of UB an an invalid (probably - * 0, so uninitialized) internal type, return `nThunk`. */ - template inline ValueType type() const { /* Explicit lookup table. switch() might compile down (and it does at least with GCC 14) to a jump table. Let's help the compiler a bit here. */ static constexpr auto table = [] { - std::array t{}; + std::array t{}; t[tUninitialized] = nThunk; t[tInt] = nInt; t[tBool] = nBool; @@ -1162,15 +1298,7 @@ public: return t; }(); - auto it = getInternalType(); - if (it == tUninitialized || it >= tNumberOfInternalTypes) [[unlikely]] { - if constexpr (invalidIsThunk) - return nThunk; - else - nixUnreachableWhenHardened(); - } - - return table[it]; + return table[getInternalType()]; } /** @@ -1261,8 +1389,6 @@ public: setStorage(Lambda{.env = e, .fun = f}); } - inline void mkBlackhole(); - void mkPrimOp(PrimOp * p); inline void mkPrimOpApp(Value * l, Value * r) noexcept @@ -1285,9 +1411,9 @@ public: setStorage(n); } - inline void mkFailed(std::exception_ptr e, Value * recovery) noexcept + inline void mkFailed() noexcept { - setStorage(new Value::Failed(e, recovery)); + setStorage(new Value::Failed{.ex = std::current_exception()}); } bool isList() const noexcept @@ -1307,13 +1433,6 @@ public: PosIdx determinePos(const PosIdx pos) const; - /** - * Check whether forcing this value requires a trivial amount of - * computation. In particular, function applications are - * non-trivial. - */ - bool isTrivial() const; - SourcePath path() const { return SourcePath( @@ -1375,6 +1494,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. ClosureThunk thunk() const noexcept { return getStorage(); @@ -1385,6 +1505,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. FunctionApplicationThunk app() const noexcept { return getStorage(); @@ -1405,26 +1526,12 @@ public: return getStorage().accessor; } - Failed & failed() const noexcept + Failed * failed() const noexcept { - auto p = getStorage(); - assert(p); - return *p; + return getStorage(); } }; -extern ExprBlackHole eBlackHole; - -bool Value::isBlackhole() const -{ - return isThunk() && thunk().expr == (Expr *) &eBlackHole; -} - -void Value::mkBlackhole() -{ - mkThunk(nullptr, (Expr *) &eBlackHole); -} - typedef std::vector> ValueVector; typedef boost::unordered_flat_map< Symbol, diff --git a/src/libexpr/include/nix/expr/value/context.hh b/src/libexpr/include/nix/expr/value/context.hh index 31f03addf2a0..4973ce0a9d94 100644 --- a/src/libexpr/include/nix/expr/value/context.hh +++ b/src/libexpr/include/nix/expr/value/context.hh @@ -64,7 +64,31 @@ struct NixStringContextElem */ using Built = SingleDerivedPath::Built; - using Raw = std::variant; + /** + * A store path that will not result in a store reference when + * used in a derivation or toFile. + * + * When you apply `builtins.toString` to a path value representing + * a path in the Nix store (as is the case with flake inputs), + * historically you got a string without context + * (e.g. `/nix/store/...-source`). This is broken, since it allows + * you to pass a store path to a derivation/toFile without a + * proper store reference. This is especially a problem with lazy + * trees, since the store path is a virtual path that doesn't + * exist. + * + * For backwards compatibility, and to warn users about this + * unsafe use of `toString`, we keep track of such strings as a + * special type of context. + */ + struct Path + { + StorePath storePath; + + GENERATE_CMP(Path, me->storePath); + }; + + using Raw = std::variant; Raw raw; @@ -100,4 +124,10 @@ struct NixStringContextElem */ typedef std::set NixStringContext; +/** + * Returns false if `context` has no elements other than + * `NixStringContextElem::Path`. + */ +bool hasContext(const NixStringContext & context); + } // namespace nix diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index 0b594dc0cf1f..b99cbb6d6cc2 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -7,6 +7,8 @@ project( # TODO(Qyriad): increase the warning level 'warning_level=1', 'errorlogs=true', # Please print logs for tests that fail + 'unity=on', + 'unity_size=1024', ], meson_version : '>= 1.1', license : 'LGPL-2.1-or-later', @@ -43,6 +45,7 @@ boost = dependency( modules : [ 'container', 'context', + 'thread', ], include_type : 'system', ) @@ -62,7 +65,6 @@ bdw_gc = dependency('bdw-gc', required : bdw_gc_required) if bdw_gc.found() deps_public += bdw_gc foreach funcspec : [ - 'pthread_attr_get_np', 'pthread_getattr_np', ] define_name = 'HAVE_' + funcspec.underscorify().to_upper() @@ -77,6 +79,17 @@ endif # Used in public header. Affects ABI! configdata_pub.set('NIX_USE_BOEHMGC', bdw_gc.found().to_int()) +link_args = [] + +wasmtime_required = get_option('wasm').disable_if( + get_option('default_library') == 'static', + error_message : 'Building with wasmtime and static linking is not supported', +) + +if wasmtime_required.enabled() + link_args += '-lwasmtime' +endif + toml11 = dependency( 'toml11', version : '>=3.7.0', @@ -164,13 +177,15 @@ sources = files( 'function-trace.cc', 'get-drvs.cc', 'json-to-value.cc', - 'lexer-helpers.cc', 'nixexpr.cc', + 'parallel-eval.cc', 'paths.cc', 'primops.cc', 'print-ambiguous.cc', 'print.cc', + 'provenance.cc', 'search-path.cc', + 'symbol-table.cc', 'value-to-json.cc', 'value-to-xml.cc', 'value.cc', @@ -215,6 +230,7 @@ parser_library = static_library( 'nixexpr-parser', parser_tab, lexer_tab, + files('lexer-helpers.cc'), cpp_args : parser_library_cpp_args, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, @@ -225,6 +241,7 @@ parser_library = static_library( override_options : [ 'b_ndebug=@0@'.format(not get_option('debug')), 'b_lto=@0@'.format(get_option('b_lto') and cxx.get_id() != 'gcc'), + 'unity=off', ], ) @@ -238,9 +255,9 @@ this_library = library( soversion : nix_soversion, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, - link_args : linker_export_flags, + link_args : linker_export_flags + link_args, link_whole : [ parser_library ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libexpr/meson.options b/src/libexpr/meson.options index 847bb211d302..2defbf77aaa0 100644 --- a/src/libexpr/meson.options +++ b/src/libexpr/meson.options @@ -3,3 +3,9 @@ option( type : 'feature', description : 'enable garbage collection in the Nix expression evaluator (requires Boehm GC)', ) + +option( + 'wasm', + type : 'feature', + description : 'enable wasmtime integration into the Nix expression evaluator', +) diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 4a2f71a11b8f..b52370816f5a 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -13,8 +13,6 @@ namespace nix { Counter Expr::nrExprs; -ExprBlackHole eBlackHole; - // FIXME: remove, because *symbols* are abstract and do not have a single // textual representation; see printIdentifier() std::ostream & operator<<(std::ostream & str, const SymbolStr & symbol) @@ -626,15 +624,6 @@ void ExprLambda::setDocComment(DocComment docComment) // belongs in the same conditional. body->setDocComment(docComment); } -}; - -/* Symbol table. */ - -size_t SymbolTable::totalSize() const -{ - size_t n = 0; - dump([&](SymbolStr s) { n += s.size(); }); - return n; } std::string DocComment::getInnerText(const PosTable & positions) const diff --git a/src/libexpr/package.nix b/src/libexpr/package.nix index d0aef34e95de..6eccc0645132 100644 --- a/src/libexpr/package.nix +++ b/src/libexpr/package.nix @@ -14,6 +14,7 @@ boehmgc, nlohmann_json, toml11, + wasmtime, # Configuration Options @@ -29,6 +30,11 @@ # Temporarily disabled on Windows because the `GC_throw_bad_alloc` # symbol is missing during linking. enableGC ? !stdenv.hostPlatform.isWindows, + + # Whether to use wasmtime for wasm integration in the Nix language evaluator + # + # Temporarily disabled when static linking due to Rust not compiling + enableWasm ? !stdenv.hostPlatform.isStatic, }: let @@ -36,7 +42,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr"; + pname = "determinate-nix-expr"; inherit version; workDir = ./.; @@ -64,7 +70,8 @@ mkMesonLibrary (finalAttrs: { buildInputs = [ toml11 - ]; + ] + ++ lib.optional enableWasm wasmtime; propagatedBuildInputs = [ nix-util @@ -77,8 +84,20 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "gc" enableGC) + (lib.mesonEnable "wasm" enableWasm) ]; + # Fixes a problem with the "nix-expr-libcxxStdenv-static" package output. + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + NIX_CFLAGS_COMPILE = lib.optionalString ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) "-rtlib=compiler-rt"; + meta = { platforms = lib.platforms.unix ++ lib.platforms.windows; }; diff --git a/src/libexpr/parallel-eval.cc b/src/libexpr/parallel-eval.cc new file mode 100644 index 000000000000..afc9377b1f18 --- /dev/null +++ b/src/libexpr/parallel-eval.cc @@ -0,0 +1,312 @@ +#include "nix/expr/eval.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/store/globals.hh" +#include "nix/expr/primops.hh" + +namespace nix { + +// cache line alignment to prevent false sharing +struct alignas(64) WaiterDomain +{ + std::condition_variable cv; +}; + +static std::array, 128> waiterDomains; + +thread_local bool Executor::amWorkerThread{false}; + +unsigned int Executor::getEvalCores(const EvalSettings & evalSettings) +{ + /* Note: the default number of cores is currently limited to 32 + due to scalability bottlenecks. */ + return evalSettings.evalProfilerMode != EvalProfilerMode::disabled ? 1 + : evalSettings.evalCores == 0UL ? std::min(32U, Settings::getDefaultCores()) + : evalSettings.evalCores; +} + +Executor::Executor(const EvalSettings & evalSettings) + : evalCores(getEvalCores(evalSettings)) + , enabled(evalCores > 1) + , interruptCallback(createInterruptCallback([&]() { + for (auto & domain : waiterDomains) + domain.lock()->cv.notify_all(); + })) +{ + debug("executor using %d threads", evalCores); + auto state(state_.lock()); + // FIXME: create worker threads on demand? + for (size_t n = 0; n < evalCores; ++n) + try { + createWorker(*state); + } catch (boost::thread_resource_error & e) { + if (n == 0) + throw Error("could not create any evaluator worker threads: %s", e.what()); + warn("could only create %d evaluator worker threads: %s", n, e.what()); + break; + } +} + +Executor::~Executor() +{ + std::vector threads; + { + auto state(state_.lock()); + quit = true; + std::swap(threads, state->threads); + debug("executor shutting down with %d items left", state->queue.size()); + } + + wakeup.notify_all(); + + for (auto & thr : threads) + thr.join(); +} + +void Executor::createWorker(State & state) +{ + boost::thread::attributes attrs; + attrs.set_stack_size(evalStackSize); + state.threads.push_back(boost::thread(attrs, [&]() { +#if NIX_USE_BOEHMGC + GC_stack_base sb; + GC_get_stack_base(&sb); + GC_register_my_thread(&sb); +#endif + worker(); +#if NIX_USE_BOEHMGC + GC_unregister_my_thread(); +#endif + })); +} + +void Executor::worker() +{ + ReceiveInterrupts receiveInterrupts; + + unix::interruptCheck = [&]() { return (bool) quit; }; + + amWorkerThread = true; + + while (true) { + Item item; + + while (true) { + auto state(state_.lock()); + if (quit) { + // Set an `Interrupted` exception on all promises so + // we get a nicer error than "std::future_error: + // Broken promise". + auto ex = std::make_exception_ptr(Interrupted("interrupted by the user")); + for (auto & item : state->queue) + item.second.promise.set_exception(ex); + state->queue.clear(); + return; + } + if (!state->queue.empty()) { + item = std::move(state->queue.begin()->second); + state->queue.erase(state->queue.begin()); + break; + } + state.wait(wakeup); + } + + try { + item.work(); + item.promise.set_value(); + } catch (const Interrupted &) { + quit = true; + item.promise.set_exception(std::current_exception()); + } catch (...) { + item.promise.set_exception(std::current_exception()); + } + } +} + +std::vector> Executor::spawn(WorkItems && items) +{ + if (items.empty()) + return {}; + + std::vector> futures; + + { + auto state(state_.lock()); + for (auto & item : items) { + std::promise promise; + futures.push_back(promise.get_future()); + thread_local std::random_device rd; + thread_local std::uniform_int_distribution dist(0, 1ULL << 48); + auto key = (uint64_t(item.second) << 48) | dist(rd); + state->queue.emplace(key, Item{.promise = std::move(promise), .work = std::move(item.first)}); + } + } + + if (items.size() == 1) + wakeup.notify_one(); + else + wakeup.notify_all(); + + return futures; +} + +FutureVector::~FutureVector() +{ + try { + finishAll(); + } catch (...) { + ignoreExceptionInDestructor(); + } +} + +void FutureVector::spawn(Executor::WorkItems && work) +{ + auto futures = executor.spawn(std::move(work)); + auto state(state_.lock()); + for (auto & future : futures) + state->futures.push_back(std::move(future)); +} + +void FutureVector::finishAll() +{ + std::exception_ptr ex; + while (true) { + std::vector> futures; + { + auto state(state_.lock()); + std::swap(futures, state->futures); + } + debug("got %d futures", futures.size()); + if (futures.empty()) + break; + for (auto & future : futures) + try { + future.get(); + } catch (...) { + if (ex) { + if (!getInterrupted()) + logExceptionExceptInterrupt(); + } else + ex = std::current_exception(); + } + } + if (ex) + std::rethrow_exception(ex); +} + +static Sync & getWaiterDomain(detail::ValueBase & v) +{ + auto domain = (((size_t) &v) >> 5) % waiterDomains.size(); + return waiterDomains[domain]; +} + +static std::atomic nextEvalThreadId{1}; +thread_local uint32_t myEvalThreadId(nextEvalThreadId++); + +template<> +ValueStorage::PackedPointer +ValueStorage::waitOnThunk(EvalState & state, PackedPointer expectedP0) +{ + state.nrThunksAwaited++; + + auto domain = getWaiterDomain(*this).lock(); + + auto threadId = expectedP0 >> discriminatorBits; + + if (static_cast(expectedP0 & discriminatorMask) == pdAwaited) { + /* Make sure that the value is still awaited, now that we're + holding the domain lock. */ + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + + /* If the value has been finalized in the meantime (i.e. is no + longer pending), we're done. */ + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + } else { + /* Mark this value as being waited on. */ + PackedPointer p0_ = expectedP0; + if (!p0.compare_exchange_strong( + p0_, + pdAwaited | (threadId << discriminatorBits), + std::memory_order_acquire, + std::memory_order_acquire)) { + /* If the value has been finalized in the meantime (i.e. is + no longer pending), we're done. */ + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + /* The value was already in the "waited on" state, so we're + not the only thread waiting on it. */ + } + } + + /* Wait for another thread to finish this value. */ + if (threadId == myEvalThreadId) + state.error("infinite recursion encountered") + .atPos(((Value &) *this).determinePos(noPos)) + .debugThrow(); + + state.nrThunksAwaitedSlow++; + state.currentlyWaiting++; + state.maxWaiting = std::max(state.maxWaiting, state.currentlyWaiting); + + auto now1 = std::chrono::steady_clock::now(); + + while (true) { + domain.wait(domain->cv); + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + auto now2 = std::chrono::steady_clock::now(); + state.microsecondsWaiting += std::chrono::duration_cast(now2 - now1).count(); + state.currentlyWaiting--; + return p0_; + } + state.nrSpuriousWakeups++; + checkInterrupt(); + } +} + +template<> +void ValueStorage::notifyWaiters() +{ + auto domain = getWaiterDomain(*this).lock(); + + domain->cv.notify_all(); +} + +static void prim_parallel(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceList(*args[0], pos, "while evaluating the first argument passed to builtins.parallel"); + + if (state.executor->enabled) { + Executor::WorkItems work; + for (auto value : args[0]->listView()) + if (!value->isFinished()) + state.addWork( + work, 0, [value(allocRootValue(value)), &state, pos]() { state.forceValue(**value, pos); }); + state.executor->spawn(std::move(work)); + } + + state.forceValue(*args[1], pos); + v = *args[1]; +} + +// FIXME: gate this behind an experimental feature. +static RegisterPrimOp r_parallel({ + .name = "__parallel", + .args = {"xs", "x"}, + .arity = 2, + .doc = R"( + Start evaluation of the values `xs` in the background and return `x`. + )", + .impl = prim_parallel, + .experimentalFeature = Xp::ParallelEval, +}); + +} // namespace nix diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index 08c2a45abb0f..00165b44cc4e 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -20,24 +20,97 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } -StorePath -EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) +StorePath EvalState::devirtualize(const StorePath & path, StringMap * rewrites) { - auto [storePath, narHash] = fetchToStore2(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + if (auto mount = storeFS->getMount(CanonPath(store->printStorePath(path)))) { + auto storePath = fetchToStore( + fetchSettings, + *store, + SourcePath{ref(mount)}, + settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, + path.name()); + assert(storePath.name() == path.name()); + if (rewrites) + rewrites->emplace(path.hashPart(), storePath.hashPart()); + return storePath; + } else + return path; +} + +SingleDerivedPath EvalState::devirtualize(const SingleDerivedPath & path, StringMap * rewrites) +{ + if (auto o = std::get_if(&path.raw())) + return SingleDerivedPath::Opaque{devirtualize(o->path, rewrites)}; + else + return path; +} + +std::string EvalState::devirtualize(std::string_view s, const NixStringContext & context) +{ + StringMap rewrites; + + for (auto & c : context) + if (auto o = std::get_if(&c.raw)) + devirtualize(o->path, &rewrites); + + return rewriteStrings(std::string(s), rewrites); +} + +std::string EvalState::computeBaseName(const SourcePath & path, PosIdx pos) +{ + if (path.accessor == rootFS) { + if (auto storePath = store->maybeParseStorePath(path.path.abs())) { + debug( + "Copying '%s' to the store again.\n" + "You can make Nix evaluate faster and copy fewer files by replacing `./.` with the `self` flake input, " + "or `builtins.path { path = ./.; name = \"source\"; }`.\n", + path); + return std::string( + fetchToStore(fetchSettings, *store, path, FetchMode::DryRun, storePath->name()).to_string()); + } + } + return std::string(path.baseName()); +} + +StorePath EvalState::mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash) +{ + auto storePath = settings.lazyTrees + ? StorePath::random(input.getName()) + : fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); allowPath(storePath); // FIXME: should just whitelist the entire virtual store + std::optional _narHash; + + auto getNarHash = [&]() { + if (!_narHash) { + if (store->isValidPath(storePath)) + _narHash = store->queryPathInfo(storePath)->narHash; + else + _narHash = fetchToStore2(fetchSettings, *store, accessor, FetchMode::DryRun, input.getName()).second; + } + return _narHash; + }; + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + if (forceNarHash + || (requireLockable && (!settings.lazyTrees || !settings.lazyLocks || !input.isLocked(fetchSettings)) + && !input.getNarHash())) + input.attrs.insert_or_assign("narHash", getNarHash()->to_string(HashFormat::SRI, true)); - if (originalInput.getNarHash() && narHash != *originalInput.getNarHash()) + if (originalInput.getNarHash() && *getNarHash() != *originalInput.getNarHash()) throw Error( (unsigned int) 102, "NAR hash mismatch in input '%s', expected '%s' but got '%s'", originalInput.to_string(), - narHash.to_string(HashFormat::SRI, true), - originalInput.getNarHash()->to_string(HashFormat::SRI, true)); + originalInput.getNarHash()->to_string(HashFormat::SRI, true), + getNarHash()->to_string(HashFormat::SRI, true)); return storePath; } diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 3fb31fb1eb28..a997c96c760f 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -18,6 +18,9 @@ #include "nix/expr/primops.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/sort.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/expr/provenance.hh" +#include "nix/util/override-provenance-source-accessor.hh" #include #include @@ -74,6 +77,7 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS for (auto & c : context) { auto ensureValid = [&](const StorePath & p) { + waitForPath(p); if (!store->isValidPath(p)) error(p).debugThrow(); }; @@ -88,7 +92,10 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS ensureValid(b.drvPath->getBaseStorePath()); }, [&](const NixStringContextElem::Opaque & o) { - ensureValid(o.path); + // We consider virtual store paths valid here. They'll + // be devirtualized if needed elsewhere. + if (!storeFS->getMount(CanonPath(store->printStorePath(o.path)))) + ensureValid(o.path); if (maybePathsOut) maybePathsOut->emplace(o.path); }, @@ -98,6 +105,9 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS if (maybePathsOut) maybePathsOut->emplace(d.drvPath); }, + [&](const NixStringContextElem::Path & p) { + // FIXME: do something? + }, }, c.raw); } @@ -299,6 +309,7 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v if (!state.store->isStorePath(path2)) return std::nullopt; auto storePath = state.store->parseStorePath(path2); + state.waitForPath(storePath); if (!(state.store->isValidPath(storePath) && isDerivation(path2))) return std::nullopt; return storePath; @@ -1148,7 +1159,7 @@ static RegisterPrimOp primop_floor({ a NixInt and if `*number* < -9007199254740992` or `*number* > 9007199254740992`. If the datatype of *number* is neither a NixInt (signed 64-bit integer) nor a NixFloat - (IEEE-754 double-precision floating-point number), an evaluation error will be thrown. + (IEEE-754 double-precision floating-point number), an evaluation error is thrown. )", .impl = prim_floor, }); @@ -1195,7 +1206,7 @@ static RegisterPrimOp primop_tryEval({ `false` if an error was thrown) and `value`, equalling *e* if successful and `false` otherwise. `tryEval` only prevents errors created by `throw` or `assert` from being thrown. - Errors `tryEval` doesn't catch are, for example, those created + Errors that `tryEval` doesn't catch are, for example, those created by `abort` and type errors generated by builtins. Also note that this doesn't evaluate *e* deeply, so `let e = { x = throw ""; }; in (builtins.tryEval e).success` is `true`. Using @@ -1348,7 +1359,7 @@ static RegisterPrimOp primop_warn({ [`debugger-on-trace`](@docroot@/command-ref/conf-file.md#conf-debugger-on-trace) or [`debugger-on-warn`](@docroot@/command-ref/conf-file.md#conf-debugger-on-warn) option is set to `true` and the `--debugger` flag is given, the - interactive debugger will be started when `warn` is called (like + interactive debugger is started when `warn` is called (like [`break`](@docroot@/language/builtins.md#builtins-break)). If the @@ -1372,16 +1383,15 @@ static void prim_second(EvalState & state, const PosIdx pos, Value ** args, Valu * Derivations *************************************************************/ -static void derivationStrictInternal(EvalState & state, std::string_view name, const Bindings * attrs, Value & v); +static void derivationStrictInternal( + EvalState & state, + std::string_view name, + const Bindings * attrs, + Value & v, + std::shared_ptr provenance, + bool acceptMeta); -/* Construct (as a unobservable side effect) a Nix derivation - expression that performs the derivation described by the argument - set. Returns the original set extended with the following - attributes: `outPath' containing the primary output path of the - derivation; `drvPath' containing the path of the Nix expression; - and `type' set to `derivation' to indicate that this is a - derivation. */ -static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value ** args, Value & v) +static void prim_derivationStrictGeneric(EvalState & state, const PosIdx pos, Value ** args, Value & v, bool acceptMeta) { state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.derivationStrict"); @@ -1401,7 +1411,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value ** } try { - derivationStrictInternal(state, drvName, attrs, v); + derivationStrictInternal(state, drvName, attrs, v, state.evalContext.provenance, acceptMeta); } catch (Error & e) { Pos pos = state.positions[nameAttr->pos]; /* @@ -1432,6 +1442,18 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value ** } } +/* Construct a Nix derivation with metadata provenance */ +static RegisterPrimOp primop_derivationStrictWithMeta( + PrimOp{ + .name = "derivationStrictWithMeta", + .arity = 1, + .impl = + [](EvalState & state, const PosIdx pos, Value ** args, Value & v) { + prim_derivationStrictGeneric(state, pos, args, v, /*acceptMeta=*/true); + }, + .internal = true, + }); + /** * Early validation for the derivation name, for better error message. * It is checked again when constructing store paths. @@ -1455,7 +1477,13 @@ static void checkDerivationName(EvalState & state, std::string_view drvName) } } -static void derivationStrictInternal(EvalState & state, std::string_view drvName, const Bindings * attrs, Value & v) +static void derivationStrictInternal( + EvalState & state, + std::string_view drvName, + const Bindings * attrs, + Value & v, + std::shared_ptr provenance, + bool acceptMeta) { checkDerivationName(state, drvName); @@ -1580,7 +1608,19 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName the environment. */ default: - if (jsonObject) { + if (acceptMeta && i->name == EvalState::s.__meta) { + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + state.forceAttrs(*i->value, pos, "while evaluating __meta"); + NixStringContext ctx; + auto obj = printValueAsJSON(state, true, *i->value, pos, ctx); + + if (!ctx.empty()) + throw Error("Derivation __meta provenance can't contain string context like store paths."); + + provenance = + std::make_shared(provenance, make_ref(obj)); + } + } else if (jsonObject) { if (i->name == state.s.structuredAttrs) continue; @@ -1704,6 +1744,10 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName /* Everything in the context of the strings in the derivation attributes should be added as dependencies of the resulting derivation. */ + StringMap rewrites; + + std::optional drvS; + for (auto & c : context) { std::visit( overloaded{ @@ -1715,6 +1759,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::DrvDeep & d) { /* !!! This doesn't work if readOnlyMode is set. */ StorePathSet refs; + // FIXME: don't need to wait, we only need the references. + state.waitForPath(d.drvPath); state.store->computeFSClosure(d.drvPath, refs); for (auto & j : refs) { drv.inputSrcs.insert(j); @@ -1726,11 +1772,27 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::Built & b) { drv.inputDrvs.ensureSlot(*b.drvPath).value.insert(b.output); }, - [&](const NixStringContextElem::Opaque & o) { drv.inputSrcs.insert(o.path); }, + [&](const NixStringContextElem::Opaque & o) { + drv.inputSrcs.insert(state.devirtualize(o.path, &rewrites)); + }, + [&](const NixStringContextElem::Path & p) { + if (!drvS) + drvS = drv.unparse(*state.store, true); + if (drvS->find(p.storePath.to_string()) != drvS->npos) { + auto devirtualized = state.devirtualize(p.storePath, &rewrites); + warn( + "Using 'builtins.derivation' to create a derivation named '%s' that references the store path '%s' without a proper context. " + "The resulting derivation will not have a correct store reference, so this is unreliable and may stop working in the future.", + drvName, + state.store->printStorePath(devirtualized)); + } + }, }, c.raw); } + drv.applyRewrites(rewrites); + /* Do we have all required attributes? */ if (drv.builder == "") state.error("required attribute 'builder' missing").atPos(v).debugThrow(); @@ -1822,8 +1884,9 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName Unless we are in read-only mode, that is, in which case we do not write anything. Users commonly do this to speed up evaluation in contexts where they don't actually want to build anything. */ - auto drvPath = - settings.readOnlyMode ? computeStorePath(*state.store, drv) : state.store->writeDerivation(drv, state.repair); + auto drvPath = settings.readOnlyMode + ? computeStorePath(*state.store, drv) + : state.store->writeDerivation(*state.asyncPathWriter, drv, state.repair, provenance); auto drvPathS = state.store->printStorePath(drvPath); printMsg(lvlChatty, "instantiated '%1%' -> '%2%'", drvName, drvPathS); @@ -1850,11 +1913,21 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName v.mkAttrs(result); } +/* Construct (as a unobservable side effect) a Nix derivation + expression that performs the derivation described by the argument + set. Returns the original set extended with the following + attributes: `outPath' containing the primary output path of the + derivation; `drvPath' containing the path of the Nix expression; + and `type' set to `derivation' to indicate that this is a + derivation. */ static RegisterPrimOp primop_derivationStrict( PrimOp{ .name = "derivationStrict", .arity = 1, - .impl = prim_derivationStrict, + .impl = + [](EvalState & state, const PosIdx pos, Value ** args, Value & v) { + prim_derivationStrictGeneric(state, pos, args, v, /*acceptMeta=*/false); + }, }); /* Return a placeholder string for the specified output that will be @@ -2090,14 +2163,17 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value ** args, Va .debugThrow(); StorePathSet refs; if (state.store->isInStore(path.path.abs())) { - try { - refs = state.store->queryPathInfo(state.store->toStorePath(path.path.abs()).first)->references; - } catch (Error &) { // FIXME: should be InvalidPathError + auto storePath = state.store->toStorePath(path.path.abs()).first; + // Skip virtual paths since they don't have references and + // don't exist anyway. + if (!state.storeFS->getMount(CanonPath(state.store->printStorePath(storePath)))) { + if (auto info = state.store->maybeQueryPathInfo(state.store->toStorePath(path.path.abs()).first)) { + // Re-scan references to filter down to just the ones that actually occur in the file. + auto refsSink = PathRefScanSink::fromPaths(info->references); + refsSink << s; + refs = refsSink.getResultPaths(); + } } - // Re-scan references to filter down to just the ones that actually occur in the file. - auto refsSink = PathRefScanSink::fromPaths(refs); - refsSink << s; - refs = refsSink.getResultPaths(); } NixStringContext context; for (auto && p : std::move(refs)) { @@ -2333,6 +2409,23 @@ static RegisterPrimOp primop_hashFile({ .impl = prim_hashFile, }); +static RegisterPrimOp primop_narHash({ + .name = "__narHash", + .args = {"p"}, + .doc = R"( + Return an SRI representation of the SHA-256 hash of the NAR serialisation of the path *p*. + )", + .impl = + [](EvalState & state, const PosIdx pos, Value ** args, Value & v) { + auto path = state.realisePath(pos, *args[0]); + auto hash = + fetchToStore2(state.fetchSettings, *state.store, path.resolveSymlinks(), FetchMode::DryRun).second; + v.mkString(hash.to_string(HashFormat::SRI, true), state.mem); + }, + // FIXME: may be useful to expose to the user. + .internal = true, +}); + static const Value & fileTypeToString(EvalState & state, SourceAccessor::Type type) { struct Constants @@ -2660,15 +2753,25 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu { NixStringContext context; auto name = state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile"); - auto contents = - state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile"); + std::string contents( + state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile")); StorePathSet refs; + StringMap rewrites; for (auto c : context) { if (auto p = std::get_if(&c.raw)) refs.insert(p->path); - else + else if (auto p = std::get_if(&c.raw)) { + if (contents.find(p->storePath.to_string()) != contents.npos) { + auto devirtualized = state.devirtualize(p->storePath, &rewrites); + warn( + "Using 'builtins.toFile' to create a file named '%s' that references the store path '%s' without a proper context. " + "The resulting file will not have a correct store reference, so this is unreliable and may stop working in the future.", + name, + state.store->printStorePath(devirtualized)); + } + } else state .error( "files created by %1% may not reference derivations, but %2% references %3%", @@ -2679,6 +2782,8 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu .debugThrow(); } + contents = rewriteStrings(contents, rewrites); + auto storePath = settings.readOnlyMode ? state.store->makeFixedOutputPathFromCA( name, TextInfo{ @@ -2694,7 +2799,8 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, refs, - state.repair); + state.repair, + state.evalContext.provenance); }); /* Note: we don't need to add `context' to the context of the @@ -2838,11 +2944,23 @@ static void addPath( name, ContentAddressWithReferences::fromParts(method, *expectedHash, {refs})); if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) { + // FIXME: make this lazy? // FIXME: support refs in fetchToStore()? + auto path2 = path.resolveSymlinks(); + // Don't use source path provenance if we have a filter applied, since we can't accurately + // record that. Instead, use the current global provenance, since it's better than nothing. + auto path3 = filter + ? SourcePath{ + make_ref( + path2.accessor, state.evalContext.provenance), + path2.path + } + : path2; + auto dstPath = refs.empty() ? fetchToStore( state.fetchSettings, *state.store, - path.resolveSymlinks(), + path3, settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, name, method, @@ -2850,7 +2968,7 @@ static void addPath( state.repair) : state.store->addToStore( name, - path.resolveSymlinks(), + path3, method, HashAlgorithm::SHA256, refs, @@ -2880,7 +2998,15 @@ static void prim_filterSource(EvalState & state, const PosIdx pos, Value ** args state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterSource"); addPath( - state, pos, path.baseName(), path, args[0], ContentAddressMethod::Raw::NixArchive, std::nullopt, v, context); + state, + pos, + state.computeBaseName(path, pos), + path, + args[0], + ContentAddressMethod::Raw::NixArchive, + std::nullopt, + v, + context); } static RegisterPrimOp primop_filterSource({ @@ -3536,6 +3662,49 @@ static RegisterPrimOp primop_mapAttrs({ .impl = prim_mapAttrs, }); +static void prim_filterAttrs(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.filterAttrs"); + + if (args[1]->attrs()->empty()) { + v = *args[1]; + return; + } + + state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterAttrs"); + + auto attrs = state.buildBindings(args[1]->attrs()->size()); + + for (auto & i : *args[1]->attrs()) { + Value * vName = Value::toPtr(state.symbols[i.name]); + Value * callArgs[] = {vName, i.value}; + Value res; + state.callFunction(*args[0], callArgs, res, noPos); + if (state.forceBool( + res, pos, "while evaluating the return value of the filtering function passed to builtins.filterAttrs")) + attrs.insert(i.name, i.value); + } + + v.mkAttrs(attrs.alreadySorted()); +} + +static RegisterPrimOp primop_filterAttrs({ + .name = "__filterAttrs", + .args = {"f", "attrset"}, + .doc = R"( + Return an attribute set consisting of the attributes in *attrset* for which + the function *f* returns `true`. The function *f* is called with two arguments: + the name of the attribute and the value of the attribute. For example, + + ```nix + builtins.filterAttrs (name: value: name == "foo") { foo = 1; bar = 2; } + ``` + + evaluates to `{ foo = 1; }`. + )", + .impl = prim_filterAttrs, +}); + static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value ** args, Value & v) { // we will first count how many values are present for each given key. @@ -3916,8 +4085,8 @@ static void anyOrAll(bool any, EvalState & state, const PosIdx pos, Value ** arg std::string_view errorCtx = any ? "while evaluating the return value of the function passed to builtins.any" : "while evaluating the return value of the function passed to builtins.all"; - Value vTmp; for (auto elem : args[1]->listView()) { + Value vTmp; state.callFunction(*args[0], *elem, vTmp, pos); bool res = state.forceBool(vTmp, pos, errorCtx); if (res == any) { @@ -4599,8 +4768,9 @@ static void prim_hashString(EvalState & state, const PosIdx pos, Value ** args, state.error("unknown hash algorithm '%1%'", algo).atPos(pos).debugThrow(); NixStringContext context; // discarded - auto s = - state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.hashString"); + auto s = state.devirtualize( + state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.hashString"), + context); v.mkString(hashString(*ha, s).to_string(HashFormat::Base16, false), state.mem); } @@ -5229,9 +5399,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) )", }); - if (!settings.pureEval) { - v.mkInt(time(nullptr)); - } + v.mkInt(time(nullptr)); addConstant( "__currentTime", v, @@ -5259,8 +5427,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) .impureOnly = true, }); - if (!settings.pureEval) - v.mkString(settings.getCurrentSystem(), mem); + v.mkString(settings.getCurrentSystem(), mem); addConstant( "__currentSystem", v, @@ -5446,6 +5613,16 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) .type = nFunction, }); + auto vDerivationWithMeta = allocValue(); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + addConstant( + "derivationWithMeta", + vDerivationWithMeta, + { + .type = nFunction, + }); + } + /* Now that we've added all primops, sort the `builtins' set, because attribute lookups expect it to be sorted. */ const_cast(getBuiltins().attrs())->sort(); @@ -5454,7 +5631,14 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) /* Note: we have to initialize the 'derivation' constant *after* building baseEnv/staticBaseEnv because it uses 'builtins'. */ - evalFile(derivationInternal, *vDerivation); + auto vDerivationValue = allocValue(); + evalFile(derivationInternal, *vDerivationValue); + + callFunction(*vDerivationValue, getBuiltin("derivationStrict"), *vDerivation, PosIdx()); + + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + callFunction( + *vDerivationValue, **get(internalPrimOps, "derivationStrictWithMeta"), *vDerivationWithMeta, PosIdx()); } } // namespace nix diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc index d5d5de0b9aaf..569e8a924d23 100644 --- a/src/libexpr/primops/context.cc +++ b/src/libexpr/primops/context.cc @@ -8,10 +8,16 @@ namespace nix { static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - NixStringContext context; + NixStringContext context, filtered; + auto s = state.coerceToString( pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardStringContext"); - v.mkString(*s, state.mem); + + for (auto & c : context) + if (auto * p = std::get_if(&c.raw)) + filtered.insert(*p); + + v.mkString(*s, filtered, state.mem); } static RegisterPrimOp primop_unsafeDiscardStringContext({ @@ -23,11 +29,19 @@ static RegisterPrimOp primop_unsafeDiscardStringContext({ .impl = prim_unsafeDiscardStringContext, }); +bool hasContext(const NixStringContext & context) +{ + for (auto & c : context) + if (!std::get_if(&c.raw)) + return true; + return false; +} + static void prim_hasContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { NixStringContext context; state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.hasContext"); - v.mkBool(!context.empty()); + v.mkBool(hasContext(context)); } static RegisterPrimOp primop_hasContext( @@ -62,6 +76,7 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx p NixStringContext context2; for (auto && c : context) { if (auto * ptr = std::get_if(&c.raw)) { + state.waitForPath(ptr->drvPath); // FIXME: why? context2.emplace(NixStringContextElem::Opaque{.path = ptr->drvPath}); } else { /* Can reuse original item */ @@ -133,6 +148,11 @@ static void prim_addDrvOutputDependencies(EvalState & state, const PosIdx pos, V above does not make much sense. */ return std::move(c); }, + [&](const NixStringContextElem::Path & p) -> NixStringContextElem::DrvDeep { + state.error("`addDrvOutputDependencies` does not work on a string without context") + .atPos(pos) + .debugThrow(); + }, }, context.begin()->raw)}), }; @@ -201,6 +221,7 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value ** args, contextInfos[std::move(drvPath)].outputs.emplace_back(std::move(b.output)); }, [&](NixStringContextElem::Opaque && o) { contextInfos[std::move(o.path)].path = true; }, + [&](NixStringContextElem::Path && p) {}, }, ((NixStringContextElem &&) i).raw); } diff --git a/src/libexpr/primops/derivation.nix b/src/libexpr/primops/derivation.nix index dbb8c2186889..d3b341a23713 100644 --- a/src/libexpr/primops/derivation.nix +++ b/src/libexpr/primops/derivation.nix @@ -26,6 +26,7 @@ Note that `derivation` is very bare-bones, and provides almost no commands during the build. Most likely, you'll want to use functions like `stdenv.mkDerivation` in Nixpkgs to set up a basic environment. */ +drvFunc: drvAttrs@{ outputs ? [ "out" ], ... @@ -33,7 +34,7 @@ drvAttrs@{ let - strict = derivationStrict drvAttrs; + strict = drvFunc drvAttrs; commonAttrs = drvAttrs diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc index 2e01d67172d3..0f7eafefe9e6 100644 --- a/src/libexpr/primops/fetchClosure.cc +++ b/src/libexpr/primops/fetchClosure.cc @@ -136,7 +136,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value ** args std::optional inputAddressedMaybe; for (auto & attr : *args[0]->attrs()) { - const auto & attrName = state.symbols[attr.name]; + std::string_view attrName = state.symbols[attr.name]; auto attrHint = [&]() -> std::string { return fmt("while evaluating the attribute '%s' passed to builtins.fetchClosure", attrName); }; diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index 9347645cc204..59ffeabf1ef8 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -81,7 +81,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value ** ar attrs.insert_or_assign("rev", rev->gitRev()); auto input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); - auto [storePath, input2] = input.fetchToStore(state.fetchSettings, *state.store); + auto [storePath, accessor, input2] = input.fetchToStore(state.fetchSettings, *state.store); auto attrs2 = state.buildBindings(8); state.mkStorePathString(storePath, attrs2.alloc(state.s.outPath)); diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 6e06bdbe973f..7b36809903d0 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -30,12 +30,16 @@ void emitTreeAttrs( { auto attrs = state.buildBindings(100); - state.mkStorePathString(storePath, attrs.alloc(state.s.outPath)); + auto & vStorePath = attrs.alloc(state.s.outPath); + state.mkStorePathString(storePath, vStorePath); // FIXME: support arbitrary input attributes. if (auto narHash = input.getNarHash()) attrs.alloc("narHash").mkString(narHash->to_string(HashFormat::SRI, true), state.mem); + else + // Lazily compute the NAR hash for backward compatibility. + attrs.alloc("narHash").mkApp(*get(state.internalPrimOps, "narHash"), &vStorePath); if (input.getType() == "git") attrs.alloc("submodules").mkBool(fetchers::maybeGetBoolAttr(input.attrs, "submodules").value_or(false)); @@ -77,7 +81,6 @@ struct FetchTreeParams bool emptyRevFallback = false; bool allowNameArgument = false; bool isFetchGit = false; - bool isFinal = false; }; static void fetchTree( @@ -151,11 +154,6 @@ static void fetchTree( attrs.emplace("exportIgnore", Explicit{true}); } - // fetchTree should fetch git repos with shallow = true by default - if (type == "git" && !params.isFetchGit && !attrs.contains("shallow")) { - attrs.emplace("shallow", Explicit{true}); - } - if (!params.allowNameArgument) if (auto nameIter = attrs.find("name"); nameIter != attrs.end()) state.error("argument 'name' isn’t supported in call to '%s'", fetcher) @@ -184,17 +182,11 @@ static void fetchTree( } input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); } else { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - state - .error( - "passing a string argument to '%s' requires the 'flakes' experimental feature", fetcher) - .atPos(pos) - .debugThrow(); input = fetchers::Input::fromURL(state.fetchSettings, url); } } - if (!state.settings.pureEval && !input.isDirect() && experimentalFeatureSettings.isEnabled(Xp::Flakes)) + if (!state.settings.pureEval && !input.isDirect()) input = lookupInRegistries(state.fetchSettings, *state.store, input, fetchers::UseRegistries::Limited).first; if (state.settings.pureEval && !input.isLocked(state.fetchSettings)) { @@ -213,17 +205,13 @@ static void fetchTree( state.checkURI(input.toURLString()); - if (params.isFinal) { + if (input.getNarHash()) input.attrs.insert_or_assign("__final", Explicit(true)); - } else { - if (input.isFinal()) - throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); - } auto cachedInput = state.inputCache->getAccessor(state.fetchSettings, *state.store, input, fetchers::UseRegistries::No); - auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor, true); emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } @@ -318,7 +306,6 @@ static RegisterPrimOp primop_fetchTree({ - `"mercurial"` *input* can also be a [URL-like reference](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references). - The additional input types and the URL-like syntax requires the [`flakes` experimental feature](@docroot@/development/experimental-features.md#xp-feature-flakes) to be enabled. > **Example** > @@ -358,19 +345,6 @@ static RegisterPrimOp primop_fetchTree({ return doc; }(), .impl = prim_fetchTree, - .experimentalFeature = Xp::FetchTree, -}); - -void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) -{ - fetchTree(state, pos, args, v, {.isFinal = true}); -} - -static RegisterPrimOp primop_fetchFinalTree({ - .name = "fetchFinalTree", - .args = {"input"}, - .impl = prim_fetchFinalTree, - .internal = true, }); static void fetch( @@ -719,7 +693,7 @@ static RegisterPrimOp primop_fetchGit({ name in the `ref` attribute. However, if the revision you're looking for is in a future - branch for the non-default branch you will need to specify the + branch for the non-default branch you need to specify the the `ref` attribute as well. ```nix diff --git a/src/libexpr/primops/meson.build b/src/libexpr/primops/meson.build index b8abc6409af9..d62b6df4ea20 100644 --- a/src/libexpr/primops/meson.build +++ b/src/libexpr/primops/meson.build @@ -10,3 +10,7 @@ sources += files( 'fetchTree.cc', 'fromTOML.cc', ) + +if wasmtime_required.enabled() + sources += files('wasm.cc') +endif diff --git a/src/libexpr/primops/wasm.cc b/src/libexpr/primops/wasm.cc new file mode 100644 index 000000000000..05b8a3ca38d9 --- /dev/null +++ b/src/libexpr/primops/wasm.cc @@ -0,0 +1,741 @@ +#include "nix/expr/primops.hh" +#include "nix/expr/eval-inline.hh" + +#include +#include + +using namespace wasmtime; + +namespace nix { + +using ValueId = uint32_t; + +template +T unwrap(Result && res) +{ + if (res) + return res.ok(); + throw Error(res.err().message()); +} + +static Engine & getEngine() +{ + static Engine engine = []() { + wasmtime::Config config; + config.pooling_allocation_strategy(PoolAllocationConfig()); + config.memory_init_cow(true); + return Engine(std::move(config)); + }(); + return engine; +} + +static std::span string2span(std::string_view s) +{ + return std::span((uint8_t *) s.data(), s.size()); +} + +static std::string_view span2string(std::span s) +{ + return std::string_view((char *) s.data(), s.size()); +} + +template +static std::span subspan(std::span s, size_t len) +{ + if (s.size() < len * sizeof(T)) + throw Error("Wasm memory access out of bounds"); + return std::span((T *) s.data(), len); +} + +// FIXME: move to wasmtime C++ wrapper. +class InstancePre +{ + WASMTIME_OWN_WRAPPER(InstancePre, wasmtime_instance_pre); + +public: + TrapResult instantiate(wasmtime::Store::Context cx) + { + wasmtime_instance_t instance; + wasm_trap_t * trap = nullptr; + auto * error = wasmtime_instance_pre_instantiate(ptr.get(), cx.capi(), &instance, &trap); + if (error != nullptr) { + return TrapError(wasmtime::Error(error)); + } + if (trap != nullptr) { + return TrapError(Trap(trap)); + } + return Instance(instance); + } +}; + +TrapResult instantiate_pre(Linker & linker, const Module & m) +{ + wasmtime_instance_pre_t * instance_pre; + auto * error = wasmtime_linker_instantiate_pre(linker.capi(), m.capi(), &instance_pre); + if (error != nullptr) { + return TrapError(wasmtime::Error(error)); + } + return InstancePre(instance_pre); +} + +static void regFuns(Linker & linker, bool useWasi); + +struct NixWasmInstancePre +{ + Engine & engine = getEngine(); + std::string name; + bool useWasi = false; + InstancePre instancePre; + + InstancePre compile(std::span bytes) + { + // Compile the module + auto module = unwrap(Module::compile(engine, bytes)); + + // Auto-detect WASI by checking for wasi_snapshot_preview1 imports. + for (const auto & ref : module.imports()) + if (const_cast &>(ref).module() == "wasi_snapshot_preview1") { + useWasi = true; + break; + } + + // Create linker with appropriate WASI support + Linker linker(engine); + if (useWasi) + unwrap(linker.define_wasi()); + regFuns(linker, useWasi); + + return unwrap(instantiate_pre(linker, module)); + } + + NixWasmInstancePre(SourcePath wasmPath) + : name(wasmPath.baseName()) + , instancePre(compile(string2span(wasmPath.readFile()))) + { + } + + NixWasmInstancePre(std::string_view wat) + : name("") + , instancePre([&] { + auto wasm = unwrap(wat2wasm(wat)); + return compile(std::span(wasm)); + }()) + { + } +}; + +struct NixWasmInstance +{ + EvalState & state; + ref pre; + wasmtime::Store wasmStore; + wasmtime::Store::Context wasmCtx; + Instance instance; + Memory memory_; + + ValueVector values; + std::exception_ptr ex; + + std::optional functionName; + + ValueId resultId = 0; + + std::string logPrefix; + + NixWasmInstance(EvalState & _state, ref _pre) + : state(_state) + , pre(_pre) + , wasmStore(pre->engine) + , wasmCtx(wasmStore) + , instance(unwrap(pre->instancePre.instantiate(wasmCtx))) + , memory_(getExport("memory")) + , logPrefix(pre->name) + { + wasmCtx.set_data(this); + + /* Reserve value ID 0 so it can be used in functions like get_attr() to denote a missing attribute. */ + values.push_back(nullptr); + } + + ValueId addValue(Value * v) + { + auto id = values.size(); + values.emplace_back(v); + return id; + } + + std::pair allocValue() + { + auto v = state.allocValue(); + auto id = addValue(v); + return {id, *v}; + } + + Value & getValue(ValueId id) + { + if (id >= values.size() || id == 0) + throw Error("invalid ValueId %d", id); + return *values[id]; + } + + template + T getExport(std::string_view name) + { + auto ext = instance.get(wasmCtx, name); + if (!ext) + throw Error("Wasm module '%s' does not export '%s'", pre->name, name); + auto res = std::get_if(&*ext); + if (!res) + throw Error("export '%s' of Wasm module '%s' does not have the right type", name, pre->name); + return *res; + } + + std::vector runFunction(std::string_view name, const std::vector & args) + { + functionName = name; + return unwrap(getExport(name).call(wasmCtx, args)); + } + + auto memory() + { + return memory_.data(wasmCtx); + } + + std::monostate panic(uint32_t ptr, uint32_t len) + { + throw Error("Wasm panic: %s", Uncolored(span2string(memory().subspan(ptr, len)))); + } + + std::monostate warn(uint32_t ptr, uint32_t len) + { + doWarn(span2string(memory().subspan(ptr, len))); + return {}; + } + + void doWarn(std::string_view s) + { + if (functionName) + nix::warn("'%s' function '%s': %s", logPrefix, functionName.value_or(""), s); + else + nix::warn("'%s': %s", logPrefix, s); + } + + uint32_t get_type(ValueId valueId) + { + auto & value = getValue(valueId); + state.forceValue(value, noPos); + auto t = value.type(); + return t == nInt ? 1 + : t == nFloat ? 2 + : t == nBool ? 3 + : t == nString ? 4 + : t == nPath ? 5 + : t == nNull ? 6 + : t == nAttrs ? 7 + : t == nList ? 8 + : t == nFunction ? 9 + : []() -> int { throw Error("unsupported type"); }(); + } + + ValueId make_int(int64_t n) + { + auto [valueId, value] = allocValue(); + value.mkInt(n); + return valueId; + } + + int64_t get_int(ValueId valueId) + { + return state.forceInt(getValue(valueId), noPos, "while evaluating a value from Wasm").value; + } + + ValueId make_float(double x) + { + auto [valueId, value] = allocValue(); + value.mkFloat(x); + return valueId; + } + + double get_float(ValueId valueId) + { + return state.forceFloat(getValue(valueId), noPos, "while evaluating a value from Wasm"); + } + + ValueId make_string(uint32_t ptr, uint32_t len) + { + auto [valueId, value] = allocValue(); + value.mkString(span2string(memory().subspan(ptr, len)), state.mem); + return valueId; + } + + uint32_t copy_string(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto s = state.forceString(getValue(valueId), noPos, "while evaluating a value from Wasm"); + if (s.size() <= maxLen) { + auto buf = memory().subspan(ptr, maxLen); + memcpy(buf.data(), s.data(), s.size()); + } + return s.size(); + } + + ValueId make_path(ValueId baseId, uint32_t ptr, uint32_t len) + { + auto & baseValue = getValue(baseId); + state.forceValue(baseValue, noPos); + if (baseValue.type() != nPath) + throw Error("make_path expects a path value"); + auto base = baseValue.path(); + + auto [valueId, value] = allocValue(); + value.mkPath({base.accessor, CanonPath(span2string(memory().subspan(ptr, len)), base.path)}, state.mem); + return valueId; + } + + uint32_t copy_path(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto & v = getValue(valueId); + state.forceValue(v, noPos); + if (v.type() != nPath) + throw Error("copy_path expects a path value"); + auto path = v.path().path; + auto s = path.abs(); + if (s.size() <= maxLen) { + auto buf = memory().subspan(ptr, maxLen); + memcpy(buf.data(), s.data(), s.size()); + } + return s.size(); + } + + ValueId make_bool(int32_t b) + { + return addValue(state.getBool(b)); + } + + int32_t get_bool(ValueId valueId) + { + return state.forceBool(getValue(valueId), noPos, "while evaluating a value from Wasm"); + } + + ValueId make_null() + { + return addValue(&Value::vNull); + } + + ValueId make_list(uint32_t ptr, uint32_t len) + { + auto vs = subspan(memory().subspan(ptr), len); + + auto [valueId, value] = allocValue(); + + auto list = state.buildList(len); + for (const auto & [n, v] : enumerate(list)) + v = &getValue(vs[n]); // FIXME: endianness + value.mkList(list); + + return valueId; + } + + uint32_t copy_list(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto & value = getValue(valueId); + state.forceList(value, noPos, "while getting a list from Wasm"); + + if (value.listSize() <= maxLen) { + auto out = subspan(memory().subspan(ptr), value.listSize()); + + for (const auto & [n, elem] : enumerate(value.listView())) + out[n] = addValue(elem); + } + + return value.listSize(); + } + + ValueId make_attrset(uint32_t ptr, uint32_t len) + { + auto mem = memory(); + + struct Attr + { + // FIXME: endianness + uint32_t attrNamePtr; + uint32_t attrNameLen; + ValueId value; + }; + + auto attrs = subspan(mem.subspan(ptr), len); + + auto [valueId, value] = allocValue(); + auto builder = state.buildBindings(len); + for (auto & attr : attrs) + builder.insert( + state.symbols.create(span2string(mem.subspan(attr.attrNamePtr, attr.attrNameLen))), + &getValue(attr.value)); + value.mkAttrs(builder); + + return valueId; + } + + uint32_t copy_attrset(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto & value = getValue(valueId); + state.forceAttrs(value, noPos, "while copying an attrset into Wasm"); + + if (value.attrs()->size() <= maxLen) { + // FIXME: endianness. + struct Attr + { + ValueId value; + uint32_t nameLen; + }; + + auto buf = subspan(memory().subspan(ptr), maxLen); + + // FIXME: for determinism, we should return attributes in lexicographically sorted order. + for (const auto & [n, attr] : enumerate(*value.attrs())) { + buf[n].value = addValue(attr.value); + buf[n].nameLen = state.symbols[attr.name].size(); + } + } + + return value.attrs()->size(); + } + + std::monostate copy_attrname(ValueId valueId, uint32_t attrIdx, uint32_t ptr, uint32_t len) + { + auto & value = getValue(valueId); + state.forceAttrs(value, noPos, "while copying an attr name into Wasm"); + + auto & attrs = *value.attrs(); + + if ((size_t) attrIdx >= attrs.size()) + throw Error("copy_attrname: attribute index out of bounds"); + + std::string_view name = state.symbols[attrs[attrIdx].name]; + + if ((size_t) len != name.size()) + throw Error("copy_attrname: buffer length does not match attribute name length"); + + memcpy(memory().subspan(ptr, len).data(), name.data(), name.size()); + + return {}; + } + + ValueId get_attr(ValueId valueId, uint32_t ptr, uint32_t len) + { + auto attrName = span2string(memory().subspan(ptr, len)); + + auto & value = getValue(valueId); + state.forceAttrs(value, noPos, "while getting an attribute from Wasm"); + + auto attr = value.attrs()->get(state.symbols.create(attrName)); + + return attr ? addValue(attr->value) : 0; + } + + ValueId call_function(ValueId funId, uint32_t ptr, uint32_t len) + { + auto & fun = getValue(funId); + state.forceFunction(fun, noPos, "while calling a function from Wasm"); + + ValueVector args; + for (auto argId : subspan(memory().subspan(ptr), len)) + args.push_back(&getValue(argId)); + + auto [valueId, value] = allocValue(); + + state.callFunction(fun, args, value, noPos); + + return valueId; + } + + ValueId make_app(ValueId funId, uint32_t ptr, uint32_t len) + { + if (!len) + return funId; + + auto args = subspan(memory().subspan(ptr), len); + + auto res = &getValue(funId); + + while (!args.empty()) { + auto arg = &getValue(args[0]); + auto tmp = state.allocValue(); + tmp->mkApp(res, {arg}); + res = tmp; + args = args.subspan(1); + } + + return addValue(res); + } + + /** + * Read the contents of a file into Wasm memory. This is like calling `builtins.readFile`, except that it can handle + * binary files that cannot be represented as Nix strings. + */ + uint32_t read_file(ValueId pathId, uint32_t ptr, uint32_t len) + { + auto & pathValue = getValue(pathId); + auto path = state.realisePath(noPos, pathValue); + + auto contents = path.readFile(); + + if (contents.size() > std::numeric_limits::max()) + throw Error("file '%s' is too large to process in Wasm (size: %d)", path, contents.size()); + + // FIXME: this is an inefficient interface since it may cause the file to be read twice. + if (contents.size() <= len) { + auto buf = memory().subspan(ptr, len); + memcpy(buf.data(), contents.data(), contents.size()); + } + + return contents.size(); + } +}; + +template +static void regFun(Linker & linker, std::string_view name, R (NixWasmInstance::*f)(Args...)) +{ + unwrap(linker.func_wrap("env", name, [f](Caller caller, Args... args) -> Result { + try { + auto instance = std::any_cast(caller.context().get_data()); + return (*instance.*f)(args...); + } catch (std::exception & e) { + return Trap(e.what()); + } catch (...) { + return Trap("unknown exception"); + } + })); +} + +static void regFuns(Linker & linker, bool useWasi) +{ + regFun(linker, "panic", &NixWasmInstance::panic); + regFun(linker, "warn", &NixWasmInstance::warn); + regFun(linker, "get_type", &NixWasmInstance::get_type); + regFun(linker, "make_int", &NixWasmInstance::make_int); + regFun(linker, "get_int", &NixWasmInstance::get_int); + regFun(linker, "make_float", &NixWasmInstance::make_float); + regFun(linker, "get_float", &NixWasmInstance::get_float); + regFun(linker, "make_string", &NixWasmInstance::make_string); + regFun(linker, "copy_string", &NixWasmInstance::copy_string); + regFun(linker, "make_path", &NixWasmInstance::make_path); + regFun(linker, "copy_path", &NixWasmInstance::copy_path); + regFun(linker, "make_bool", &NixWasmInstance::make_bool); + regFun(linker, "get_bool", &NixWasmInstance::get_bool); + regFun(linker, "make_null", &NixWasmInstance::make_null); + regFun(linker, "make_list", &NixWasmInstance::make_list); + regFun(linker, "copy_list", &NixWasmInstance::copy_list); + regFun(linker, "make_attrset", &NixWasmInstance::make_attrset); + regFun(linker, "copy_attrset", &NixWasmInstance::copy_attrset); + regFun(linker, "copy_attrname", &NixWasmInstance::copy_attrname); + regFun(linker, "get_attr", &NixWasmInstance::get_attr); + regFun(linker, "call_function", &NixWasmInstance::call_function); + regFun(linker, "make_app", &NixWasmInstance::make_app); + regFun(linker, "read_file", &NixWasmInstance::read_file); + + if (useWasi) { + unwrap(linker.func_wrap( + "env", "return_to_nix", [](Caller caller, ValueId resultId) -> Result { + auto instance = std::any_cast(caller.context().get_data()); + instance->resultId = resultId; + return Trap("return_to_nix"); + })); + } +} + +template +struct LazyMakeRef +{ + ref p; + + template + LazyMakeRef(Args &&... args) + : p(make_ref(std::move(args...))) + { + } +}; + +static NixWasmInstance instantiateWasm(EvalState & state, const SourcePath & wasmPath) +{ + // FIXME: make this a weak Boehm GC pointer so that it can be freed during GC. + // FIXME: move to EvalState? + // Note: InstancePre in Rust is Send+Sync so it should be safe to share between threads. + static boost::concurrent_flat_map> instancesPre; + + std::shared_ptr instancePre; + + instancesPre.try_emplace_and_cvisit( + wasmPath, wasmPath, [&](auto & i) { instancePre = i.second.p; }, [&](auto & i) { instancePre = i.second.p; }); + + return NixWasmInstance{state, ref(instancePre)}; +} + +/** + * Callback for WASI stdout/stderr writes. It splits the output into lines and logs each line separately. + */ +struct WasiLogger +{ + NixWasmInstance & instance; + + std::string data; + + ~WasiLogger() + { + if (!data.empty()) + instance.doWarn(data); + } + + void operator()(std::string_view s) + { + data.append(s); + + while (true) { + auto pos = data.find('\n'); + if (pos == std::string_view::npos) + break; + instance.doWarn(data.substr(0, pos)); + data.erase(0, pos + 1); + } + } +}; + +static void prim_wasm(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceAttrs(*args[0], pos, "while evaluating the first argument to `builtins.wasm`"); + + // Check for unknown attributes + for (auto & attr : *args[0]->attrs()) { + auto name = state.symbols[attr.name]; + if (name != "path" && name != "wat" && name != "function") + throw Error("unknown attribute '%s' in first argument to `builtins.wasm`", name); + } + + auto pathAttr = args[0]->attrs()->get(state.symbols.create("path")); + auto watAttr = args[0]->attrs()->get(state.symbols.create("wat")); + + if (pathAttr && watAttr) + throw Error("'path' and 'wat' are mutually exclusive in first argument to `builtins.wasm`"); + if (!pathAttr && !watAttr) + throw Error("missing required 'path' or 'wat' attribute in first argument to `builtins.wasm`"); + + // Second argument is the value to pass to the function + auto argValue = args[1]; + + try { + auto instance = pathAttr ? instantiateWasm(state, state.realisePath(pos, *pathAttr->value)) + : NixWasmInstance{ + state, + make_ref(state.forceStringNoCtx( + *watAttr->value, pos, "while evaluating the 'wat' attribute"))}; + + // Extract 'function' attribute (optional for wasi, required for non-wasi) + std::string functionName; + auto functionAttr = args[0]->attrs()->get(state.symbols.create("function")); + if (instance.pre->useWasi) { + functionName = "_start"; + if (functionAttr) + throw Error("'function' attribute is not allowed for WASI modules"); + } else { + if (!functionAttr) + throw Error( + "missing required 'function' attribute in first argument to `builtins.wasm` for non-WASI modules"); + functionName = std::string( + state.forceStringNoCtx(*functionAttr->value, pos, "while evaluating the 'function' attribute")); + } + + debug("calling wasm module"); + + auto argId = instance.addValue(argValue); + + if (instance.pre->useWasi) { + WasiLogger logger{instance}; + + auto loggerTrampoline = [](void * data, const unsigned char * buf, size_t len) -> ptrdiff_t { + auto logger = static_cast(data); + (*logger)(std::string_view((const char *) buf, len)); + return len; + }; + + WasiConfig wasiConfig; + wasi_config_set_stdout_custom(wasiConfig.capi(), loggerTrampoline, &logger, nullptr); + wasi_config_set_stderr_custom(wasiConfig.capi(), loggerTrampoline, &logger, nullptr); + wasiConfig.argv({"wasi", std::to_string(argId)}); + unwrap(instance.wasmStore.context().set_wasi(std::move(wasiConfig))); + + auto res = instance.getExport(functionName).call(instance.wasmCtx, {}); + if (!instance.resultId) { + unwrap(std::move(res)); + throw Error( + "Wasm function '%s' from '%s' finished without returning a value", + functionName, + instance.pre->name); + } + + auto & vRes = instance.getValue(instance.resultId); + state.forceValue(vRes, pos); + v = vRes; + } else { + // FIXME: use the "start" function if present. + instance.runFunction("nix_wasm_init_v1", {}); + + auto res = instance.runFunction(functionName, {(int32_t) argId}); + if (res.size() != 1) + throw Error( + "Wasm function '%s' from '%s' did not return exactly one value", functionName, instance.pre->name); + if (res[0].kind() != ValKind::I32) + throw Error( + "Wasm function '%s' from '%s' did not return an i32 value", functionName, instance.pre->name); + auto & vRes = instance.getValue(res[0].i32()); + state.forceValue(vRes, pos); + v = vRes; + } + } catch (Error & e) { + e.addTrace(state.positions[pos], "while executing a Wasm module"); + throw; + } +} + +static RegisterPrimOp primop_wasm( + {.name = "__wasm", + .args = {"config", "arg"}, + .doc = R"( + Call a Wasm function with the specified argument. + + The first argument must be an attribute set with the following attributes: + - `path`: Path to the Wasm module (mutually exclusive with `wat`) + - `wat`: WebAssembly Text format source as a string (mutually exclusive with `path`) + - `function`: Function name to call (required for non-WASI modules, not allowed for WASI modules) + + Exactly one of `path` or `wat` must be specified. + + The second argument is the value to pass to the function. + + WASI mode is automatically enabled if the module imports from `wasi_snapshot_preview1`. + + Example (non-WASI): + ```nix + builtins.wasm { + path = ./foo.wasm; + function = "fib"; + } 33 + ``` + + Example (reading from a WAT file): + ```nix + builtins.wasm { + wat = builtins.readFile ./fib.wat; + function = "fib"; + } 10 + ``` + + Example (WASI): + ```nix + builtins.wasm { + path = ./bar.wasm; + } { x = 42; } + ``` + )", + .impl = prim_wasm, + .experimentalFeature = Xp::WasmBuiltin}); + +} // namespace nix diff --git a/src/libexpr/print-ambiguous.cc b/src/libexpr/print-ambiguous.cc index ed91cad85a47..1912262ee576 100644 --- a/src/libexpr/print-ambiguous.cc +++ b/src/libexpr/print-ambiguous.cc @@ -20,9 +20,13 @@ void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set"; + str << "«failed»"; break; case nFunction: if (v.isLambda()) { diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index f2f62a636982..57441fa379a7 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -249,7 +249,11 @@ class Printer void printString(Value & v) { - printLiteralString(output, v.string_view(), options.maxStringLength, options.ansiColors); + NixStringContext context; + copyContext(v, context); + std::ostringstream s; + printLiteralString(s, v.string_view(), options.maxStringLength, options.ansiColors); + output << state.devirtualize(s.str(), context); } void printPath(Value & v) @@ -498,7 +502,7 @@ class Printer output << "«potential infinite recursion»"; if (options.ansiColors) output << ANSI_NORMAL; - } else if (v.isThunk() || v.isApp()) { + } else if (!v.isFinished()) { if (options.ansiColors) output << ANSI_MAGENTA; output << "«thunk»"; @@ -515,7 +519,7 @@ class Printer output << ANSI_MAGENTA; // Historically, a tried and then ignored value (e.g. through tryEval) was // reverted to the original thunk. - output << "«thunk»"; + output << "«failed»"; if (options.ansiColors) output << ANSI_NORMAL; } diff --git a/src/libexpr/provenance.cc b/src/libexpr/provenance.cc new file mode 100644 index 000000000000..8bce4f120763 --- /dev/null +++ b/src/libexpr/provenance.cc @@ -0,0 +1,25 @@ +#include "nix/expr/provenance.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +nlohmann::json DerivationProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "derivation"}, + {"meta", *meta}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerDerivationProvenance("derivation", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + return make_ref(next, make_ref(valueAt(obj, "meta"))); +}); + +} // namespace nix diff --git a/src/libexpr/symbol-table.cc b/src/libexpr/symbol-table.cc new file mode 100644 index 000000000000..052c72570371 --- /dev/null +++ b/src/libexpr/symbol-table.cc @@ -0,0 +1,63 @@ +#include "nix/expr/symbol-table.hh" +#include "nix/util/logging.hh" + +#include + +namespace nix { + +#ifndef MAP_NORESERVE +# define MAP_NORESERVE 0 +#endif + +static void * allocateLazyMemory(size_t maxSize) +{ + auto p = mmap(nullptr, maxSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); + if (p == MAP_FAILED) + throw SysError("allocating arena using mmap"); + return p; +} + +ContiguousArena::ContiguousArena(size_t maxSize) + : data((char *) allocateLazyMemory(maxSize)) + , maxSize(maxSize) +{ +} + +size_t ContiguousArena::allocate(size_t bytes) +{ + auto offset = size.fetch_add(bytes); + if (offset + bytes > maxSize) + throw Error("arena ran out of space"); + return offset; +} + +Symbol SymbolTable::create(std::string_view s) +{ + uint32_t idx; + + auto visit = [&](const SymbolStr & sym) { idx = ((const char *) sym.s) - arena.data; }; + + symbols.insert_and_visit(SymbolStr::Key{s, arena}, visit, visit); + + return Symbol(idx); +} + +SymbolStr::SymbolStr(const SymbolStr::Key & key) +{ + auto size = SymbolStr::computeSize(key.s); + + auto id = key.arena.allocate(size); + + auto v = (SymbolValue *) (const_cast(key.arena.data) + id); + + auto s = (StringData *) (v + 1); + s->size_ = key.s.size(); + std::memcpy(s->data_, key.s.data(), key.s.size()); + s->data_[key.s.size()] = '\0'; + + v->mkStringNoCopy(*s); + + this->s = v; +} + +} // namespace nix diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index 4fac29a6671d..58e3b3e53993 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -2,107 +2,148 @@ #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/util/signals.hh" +#include "nix/expr/parallel-eval.hh" #include #include #include namespace nix { + using json = nlohmann::json; +#pragma GCC diagnostic ignored "-Wswitch-enum" + +static void parallelForceDeep(EvalState & state, Value & v, PosIdx pos) +{ + state.forceValue(v, pos); + + Executor::WorkItems work; + + switch (v.type()) { + + case nAttrs: { + NixStringContext context; + if (state.tryAttrsToString(pos, v, context, false, false)) + return; + if (v.attrs()->get(state.s.outPath)) + return; + for (auto & a : *v.attrs()) + state.addWork(work, 0, [value(allocRootValue(a.value)), pos(a.pos), &state]() { + parallelForceDeep(state, **value, pos); + }); + break; + } + + default: + break; + } + + state.executor->spawn(std::move(work)); +} + // TODO: rename. It doesn't print. json printValueAsJSON( EvalState & state, bool strict, Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore) { - checkInterrupt(); + if (strict && state.executor->enabled && !Executor::amWorkerThread) + parallelForceDeep(state, v, pos); - auto _level = state.addCallDepth(pos); + auto recurse = [&](this const auto & recurse, json & res, Value & v, PosIdx pos) -> void { + checkInterrupt(); - if (strict) - state.forceValue(v, pos); + auto _level = state.addCallDepth(pos); - json out; + if (strict) + state.forceValue(v, pos); - switch (v.type()) { + switch (v.type()) { - case nInt: - out = v.integer().value; - break; + case nInt: + res = v.integer().value; + break; - case nBool: - out = v.boolean(); - break; + case nBool: + res = v.boolean(); + break; - case nString: - copyContext(v, context); - out = v.string_view(); - break; + case nString: { + copyContext(v, context); + res = v.string_view(); + break; + } - case nPath: - if (copyToStore) - out = state.store->printStorePath(state.copyPathToStore(context, v.path())); - else - out = v.path().path.abs(); - break; + case nPath: + if (copyToStore) + res = state.store->printStorePath(state.copyPathToStore(context, v.path(), v.determinePos(pos))); + else + res = v.path().path.abs(); + break; - case nNull: - // already initialized as null - break; + case nNull: + // already initialized as null + break; - case nAttrs: { - auto maybeString = state.tryAttrsToString(pos, v, context, false, false); - if (maybeString) { - out = *maybeString; + case nAttrs: { + auto maybeString = state.tryAttrsToString(pos, v, context, false, false); + if (maybeString) { + res = *maybeString; + break; + } + if (auto i = v.attrs()->get(state.s.outPath)) + return recurse(res, *i->value, i->pos); + else { + res = json::object(); + for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + json & j = res.emplace(state.symbols[a->name], json()).first.value(); + try { + recurse(j, *a->value, a->pos); + } catch (Error & e) { + e.addTrace( + state.positions[a->pos], + HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + throw; + } + } + } break; } - if (auto i = v.attrs()->get(state.s.outPath)) - return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore); - else { - out = json::object(); - for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + + case nList: { + res = json::array(); + for (const auto & [i, elem] : enumerate(v.listView())) { try { - out.emplace( - state.symbols[a->name], - printValueAsJSON(state, strict, *a->value, a->pos, context, copyToStore)); + res.push_back(json()); + recurse(res.back(), *elem, pos); } catch (Error & e) { - e.addTrace( - state.positions[a->pos], HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); throw; } } + break; } - break; - } - case nList: { - out = json::array(); - int i = 0; - for (auto elem : v.listView()) { - try { - out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore)); - } catch (Error & e) { - e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); - throw; - } - i++; + case nExternal: { + res = v.external()->printValueAsJSON(state, strict, context, copyToStore); + break; } - break; - } - case nExternal: - return v.external()->printValueAsJSON(state, strict, context, copyToStore); - break; + case nFloat: + res = v.fpoint(); + break; - case nFloat: - out = v.fpoint(); - break; + case nThunk: + case nFailed: + case nFunction: + state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); + } + }; - case nThunk: - case nFailed: - case nFunction: - state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); - } - return out; + json res; + + recurse(res, v, pos); + + return res; } void printValueAsJSON( diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index 4a17bbdc3a83..3bdc73f94aee 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -51,6 +51,11 @@ NixStringContextElem NixStringContextElem::parse(std::string_view s0, const Expe .drvPath = StorePath{s.substr(1)}, }; } + case '@': { + return NixStringContextElem::Path{ + .storePath = StorePath{s.substr(1)}, + }; + } default: { // Ensure no '!' if (s.find("!") != std::string_view::npos) { @@ -91,6 +96,10 @@ std::string NixStringContextElem::to_string() const res += '='; res += d.drvPath.to_string(); }, + [&](const NixStringContextElem::Path & p) { + res += '@'; + res += p.storePath.to_string(); + }, }, raw); @@ -108,6 +117,9 @@ std::string NixStringContextElem::display(const StoreDirConfig & store) const return store.printStorePath(d.drvPath) + " (deep)"; }, [&](const NixStringContextElem::Built & b) -> std::string { return SingleDerivedPath{b}.to_string(store); }, + [&](const NixStringContextElem::Path & p) -> std::string { + return store.printStorePath(p.storePath) + " (untracked)"; + }, }, raw); } diff --git a/src/libfetchers-c/meson.build b/src/libfetchers-c/meson.build index db415d9173e7..c029eb0d1e59 100644 --- a/src/libfetchers-c/meson.build +++ b/src/libfetchers-c/meson.build @@ -57,7 +57,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libfetchers-c/package.nix b/src/libfetchers-c/package.nix index 9a601d70417c..13ec30d566eb 100644 --- a/src/libfetchers-c/package.nix +++ b/src/libfetchers-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers-c"; + pname = "determinate-nix-fetchers-c"; inherit version; workDir = ./.; diff --git a/src/libfetchers-tests/access-tokens.cc b/src/libfetchers-tests/access-tokens.cc index 7127434db9df..26cdcfb83fc9 100644 --- a/src/libfetchers-tests/access-tokens.cc +++ b/src/libfetchers-tests/access-tokens.cc @@ -15,10 +15,7 @@ class AccessKeysTest : public ::testing::Test protected: public: - void SetUp() override - { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - } + void SetUp() override {} void TearDown() override {} }; diff --git a/src/libfetchers/attrs.cc b/src/libfetchers/attrs.cc index 841808bd16a9..648d48545431 100644 --- a/src/libfetchers/attrs.cc +++ b/src/libfetchers/attrs.cc @@ -27,6 +27,9 @@ nlohmann::json attrsToJSON(const Attrs & attrs) { nlohmann::json json; for (auto & attr : attrs) { + /* The __final attribute is purely internal, so never serialize it. */ + if (attr.first == "__final") + continue; if (auto v = std::get_if(&attr.second)) { json[attr.first] = *v; } else if (auto v = std::get_if(&attr.second)) { diff --git a/src/libfetchers/builtin-flake-registry.json b/src/libfetchers/builtin-flake-registry.json new file mode 100644 index 000000000000..65e973290a05 --- /dev/null +++ b/src/libfetchers/builtin-flake-registry.json @@ -0,0 +1,425 @@ +{ + "flakes": [ + { + "from": { + "id": "agda", + "type": "indirect" + }, + "to": { + "owner": "agda", + "repo": "agda", + "type": "github" + } + }, + { + "from": { + "id": "agenix", + "type": "indirect" + }, + "to": { + "owner": "ryantm", + "repo": "agenix", + "type": "github" + } + }, + { + "from": { + "id": "arion", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "arion", + "type": "github" + } + }, + { + "from": { + "id": "blender-bin", + "type": "indirect" + }, + "to": { + "dir": "blender", + "owner": "edolstra", + "repo": "nix-warez", + "type": "github" + } + }, + { + "from": { + "id": "bundlers", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "bundlers", + "type": "github" + } + }, + { + "from": { + "id": "cachix", + "type": "indirect" + }, + "to": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + { + "from": { + "id": "composable", + "type": "indirect" + }, + "to": { + "owner": "ComposableFi", + "repo": "composable", + "type": "github" + } + }, + { + "from": { + "id": "disko", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "disko", + "type": "github" + } + }, + { + "from": { + "id": "dreampkgs", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "dreampkgs", + "type": "github" + } + }, + { + "from": { + "id": "dwarffs", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "dwarffs", + "type": "github" + } + }, + { + "from": { + "id": "emacs-overlay", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "emacs-overlay", + "type": "github" + } + }, + { + "from": { + "id": "fenix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + { + "from": { + "id": "flake-parts", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + { + "from": { + "id": "flake-utils", + "type": "indirect" + }, + "to": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + { + "from": { + "id": "helix", + "type": "indirect" + }, + "to": { + "owner": "helix-editor", + "repo": "helix", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-agent", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-agent", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-effects", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-effects", + "type": "github" + } + }, + { + "from": { + "id": "home-manager", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, + { + "from": { + "id": "hydra", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "hydra", + "type": "github" + } + }, + { + "from": { + "id": "mach-nix", + "type": "indirect" + }, + "to": { + "owner": "DavHau", + "repo": "mach-nix", + "type": "github" + } + }, + { + "from": { + "id": "ngipkgs", + "type": "indirect" + }, + "to": { + "owner": "ngi-nix", + "repo": "ngipkgs", + "type": "github" + } + }, + { + "from": { + "id": "nickel", + "type": "indirect" + }, + "to": { + "owner": "tweag", + "repo": "nickel", + "type": "github" + } + }, + { + "from": { + "id": "nix", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nix", + "type": "github" + } + }, + { + "from": { + "id": "nix-darwin", + "type": "indirect" + }, + "to": { + "owner": "nix-darwin", + "repo": "nix-darwin", + "type": "github" + } + }, + { + "from": { + "id": "nix-serve", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "nix-serve", + "type": "github" + } + }, + { + "from": { + "id": "nixops", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixops", + "type": "github" + } + }, + { + "from": { + "id": "nixos-anywhere", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "nixos-anywhere", + "type": "github" + } + }, + { + "from": { + "id": "nixos-hardware", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-hardware", + "type": "github" + } + }, + { + "from": { + "id": "nixos-homepage", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-homepage", + "type": "github" + } + }, + { + "from": { + "id": "nixos-search", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-search", + "type": "github" + } + }, + { + "from": { + "id": "nixpkgs", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + { + "from": { + "id": "nur", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "NUR", + "type": "github" + } + }, + { + "from": { + "id": "patchelf", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "patchelf", + "type": "github" + } + }, + { + "from": { + "id": "poetry2nix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + { + "from": { + "id": "pridefetch", + "type": "indirect" + }, + "to": { + "owner": "SpyHoodle", + "repo": "pridefetch", + "type": "github" + } + }, + { + "from": { + "id": "sops-nix", + "type": "indirect" + }, + "to": { + "owner": "Mic92", + "repo": "sops-nix", + "type": "github" + } + }, + { + "from": { + "id": "systems", + "type": "indirect" + }, + "to": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + { + "from": { + "id": "templates", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "templates", + "type": "github" + } + } + ], + "version": 2 +} diff --git a/src/libfetchers/builtin.cc b/src/libfetchers/builtin.cc new file mode 100644 index 000000000000..44b3baf0b1aa --- /dev/null +++ b/src/libfetchers/builtin.cc @@ -0,0 +1,60 @@ +#include "nix/store/builtins.hh" +#include "nix/store/parsed-derivations.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/fetchers/fetch-settings.hh" +#include "nix/util/archive.hh" +#include "nix/store/filetransfer.hh" +#include "nix/store/store-open.hh" + +#include + +namespace nix { + +static void builtinFetchTree(const BuiltinBuilderContext & ctx) +{ + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); + + auto out = get(ctx.drv.outputs, "out"); + if (!out) + throw Error("'builtin:fetch-tree' requires an 'out' output"); + + if (!(ctx.drv.type().isFixed() || ctx.drv.type().isImpure())) + throw Error("'builtin:fetch-tree' must be a fixed-output or impure derivation"); + + if (!ctx.drv.structuredAttrs) + throw Error("'builtin:fetch-tree' must have '__structuredAttrs = true'"); + + setenv("NIX_CACHE_HOME", ctx.tmpDirInSandbox.c_str(), 1); + + using namespace fetchers; + + fetchers::Settings myFetchSettings; + myFetchSettings.accessTokens = fetchSettings.accessTokens.get(); + + // Make sure we don't use the FileTransfer object of the parent + // since it's in a broken state after the fork. We also must not + // delete it, so hang on to the shared_ptr. + // FIXME: move FileTransfer into fetchers::Settings. + static auto prevFileTransfer = resetFileTransfer(); + + // FIXME: disable use of the git/tarball cache + + auto input = Input::fromAttrs(myFetchSettings, jsonToAttrs(ctx.drv.structuredAttrs->structuredAttrs.at("input"))); + + std::cerr << fmt("fetching '%s'...\n", input.to_string()); + + /* Functions like downloadFile() expect a store. We can't use the + real one since we're in a forked process. FIXME: use recursive + Nix's daemon so we can use the real store? */ + auto tmpStore = openStore(ctx.tmpDirInSandbox + "/nix"); + + auto [accessor, lockedInput] = input.getAccessor(myFetchSettings, *tmpStore); + + auto source = sinkToSource([&](Sink & sink) { accessor->dumpPath(CanonPath::root, sink); }); + + restorePath(ctx.outputs.at("out"), *source); +} + +static RegisterBuiltinBuilder registerUnpackChannel("fetch-tree", builtinFetchTree); + +} // namespace nix diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index cf60e29a8e15..09947e7f8f0f 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -115,7 +115,7 @@ struct CacheImpl : Cache upsert(key, value); } - std::optional lookupStorePath(Key key, Store & store) override + std::optional lookupStorePath(Key key, Store & store, bool allowInvalid) override { key.second.insert_or_assign("store", store.storeDir); @@ -129,7 +129,7 @@ struct CacheImpl : Cache ResultWithStorePath res2(*res, StorePath(storePathS)); store.addTempRoot(res2.storePath); - if (!store.isValidPath(res2.storePath)) { + if (!allowInvalid && !store.isValidPath(res2.storePath)) { // FIXME: we could try to substitute 'storePath'. debug( "ignoring disappeared cache entry '%s:%s' -> '%s'", @@ -151,7 +151,7 @@ struct CacheImpl : Cache std::optional lookupStorePathWithTTL(Key key, Store & store) override { - auto res = lookupStorePath(std::move(key), store); + auto res = lookupStorePath(std::move(key), store, false); return res && !res->expired ? res : std::nullopt; } }; diff --git a/src/libfetchers/fetch-settings.cc b/src/libfetchers/fetch-settings.cc index f92b94a0b3bd..f50177f094e3 100644 --- a/src/libfetchers/fetch-settings.cc +++ b/src/libfetchers/fetch-settings.cc @@ -1,7 +1,16 @@ #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/config-global.hh" namespace nix::fetchers { Settings::Settings() {} } // namespace nix::fetchers + +namespace nix { + +fetchers::Settings fetchSettings; + +static GlobalConfig::Register rFetchSettings(&fetchSettings); + +} // namespace nix diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 3af2d4c83e88..3e932454dcd2 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -53,7 +53,7 @@ std::pair fetchToStore2( if (mode != FetchMode::DryRun) store.addTempRoot(storePath); - if (mode == FetchMode::DryRun || store.isValidPath(storePath)) { + if (mode == FetchMode::DryRun || store.maybeQueryPathInfo(storePath)) { debug( "source path '%s' cache hit in '%s' (hash '%s')", path, @@ -65,7 +65,7 @@ std::pair fetchToStore2( } } else { static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; - if (barf && !filter) + if (barf && !filter && !(path.to_string().starts_with("/") || path.to_string().starts_with("«path:/"))) throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 575e479d49e3..ae16dfc0e7d7 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -4,11 +4,16 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/json-utils.hh" #include "nix/fetchers/fetch-settings.hh" -#include "nix/fetchers/fetch-to-store.hh" +#include "nix/fetchers/provenance.hh" #include "nix/util/url.hh" +#include "nix/util/forwarding-source-accessor.hh" #include "nix/util/archive.hh" +#include "nix/util/users.hh" +#include "nix/store/pathlocks.hh" +#include "nix/util/environment-variables.hh" #include +#include namespace nix::fetchers { @@ -126,24 +131,30 @@ std::optional Input::getFingerprint(Store & store) const return fingerprint; } -ParsedURL Input::toURL() const +ParsedURL Input::toURL(bool abbreviate) const { if (!scheme) throw Error("cannot show unsupported input '%s'", attrsToJSON(attrs)); - return scheme->toURL(*this); + + auto url = scheme->toURL(*this, abbreviate); + + if (abbreviate) + url.query.erase("narHash"); + + return url; } -std::string Input::toURLString(const StringMap & extraQuery) const +std::string Input::toURLString(const StringMap & extraQuery, bool abbreviate) const { - auto url = toURL(); + auto url = toURL(abbreviate); for (auto & attr : extraQuery) url.query.insert(attr); return url.to_string(); } -std::string Input::to_string() const +std::string Input::to_string(bool abbreviate) const { - return toURL().to_string(); + return toURL(abbreviate).to_string(); } bool Input::isDirect() const @@ -189,36 +200,30 @@ bool Input::contains(const Input & other) const return false; } -// FIXME: remove -std::pair Input::fetchToStore(const Settings & settings, Store & store) const +std::tuple, Input> Input::fetchToStore(const Settings & settings, Store & store) const { if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - auto [storePath, input] = [&]() -> std::pair { - try { - auto [accessor, result] = getAccessorUnchecked(settings, store); - - auto storePath = - nix::fetchToStore(settings, store, SourcePath(accessor), FetchMode::Copy, result.getName()); + try { + auto [accessor, result] = getAccessorUnchecked(settings, store); - auto narHash = store.queryPathInfo(storePath)->narHash; - result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + auto storePath = nix::fetchToStore(settings, store, SourcePath(accessor), FetchMode::Copy, result.getName()); - result.attrs.insert_or_assign("__final", Explicit(true)); + auto narHash = store.queryPathInfo(storePath)->narHash; + result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - assert(result.isFinal()); + result.attrs.insert_or_assign("__final", Explicit(true)); - checkLocks(*this, result); + assert(result.isFinal()); - return {storePath, result}; - } catch (Error & e) { - e.addTrace({}, "while fetching the input '%s'", to_string()); - throw; - } - }(); + checkLocks(*this, result); - return {std::move(storePath), input}; + return {std::move(storePath), accessor, result}; + } catch (Error & e) { + e.addTrace({}, "while fetching the input '%s'", to_string()); + throw; + } } void Input::checkLocks(Input specified, Input & result) @@ -272,24 +277,10 @@ void Input::checkLocks(Input specified, Input & result) } } - if (auto prevLastModified = specified.getLastModified()) { - if (result.getLastModified() != prevLastModified) - throw Error( - "'lastModified' attribute mismatch in input '%s', expected %d, got %d", - result.to_string(), - *prevLastModified, - result.getLastModified().value_or(-1)); - } - if (auto prevRev = specified.getRev()) { if (result.getRev() != prevRev) throw Error("'rev' attribute mismatch in input '%s', expected %s", result.to_string(), prevRev->gitRev()); } - - if (auto prevRevCount = specified.getRevCount()) { - if (result.getRevCount() != prevRevCount) - throw Error("'revCount' attribute mismatch in input '%s', expected %d", result.to_string(), *prevRevCount); - } } std::pair, Input> Input::getAccessor(const Settings & settings, Store & store) const @@ -308,6 +299,21 @@ std::pair, Input> Input::getAccessor(const Settings & settin } } +/** + * Helper class that ensures that paths in substituted source trees + * are rendered as `«input»/path` rather than + * `«input»/nix/store/-source/path`. + */ +struct SubstitutedSourceAccessor : ForwardingSourceAccessor +{ + using ForwardingSourceAccessor::ForwardingSourceAccessor; + + std::string showPath(const CanonPath & path) override + { + return displayPrefix + path.abs() + displaySuffix; + } +}; + std::pair, Input> Input::getAccessorUnchecked(const Settings & settings, Store & store) const { // FIXME: cache the accessor @@ -315,55 +321,92 @@ std::pair, Input> Input::getAccessorUnchecked(const Settings if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - /* The tree may already be in the Nix store, or it could be - substituted (which is often faster than fetching from the - original source). So check that. We only do this for final - inputs, otherwise there is a risk that we don't return the - same attributes (like `lastModified`) that the "real" fetcher - would return. - - FIXME: add a setting to disable this. - FIXME: substituting may be slower than fetching normally, - e.g. for fetchers like Git that are incremental! - */ - if (isFinal() && getNarHash()) { - try { - auto storePath = computeStorePath(store); + std::optional storePath; + if (isFinal() && getNarHash()) + storePath = computeStorePath(store); + + auto makeStoreAccessor = [&]() -> std::pair, Input> { + auto accessor = make_ref(store.requireStoreObjectAccessor(*storePath)); + + // FIXME: use the NAR hash for fingerprinting Git trees since it may have a .gitattributes file and we don't + // know if we used `git archive` or libgit2 to fetch it. + accessor->fingerprint = getType() == "git" ? std::optional(storePath->hashPart()) : getFingerprint(store); + cachedFingerprint = accessor->fingerprint; + + // Store a cache entry for the substituted tree so later fetches + // can reuse the existing nar instead of copying the unpacked + // input back into the store on every evaluation. + if (accessor->fingerprint) { + settings.getCache()->upsert( + makeSourcePathToHashCacheKey( + *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, CanonPath::root), + {{"hash", store.queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)}}); + } - store.ensurePath(storePath); + accessor->provenance = std::make_shared(*this); - debug("using substituted/cached input '%s' in '%s'", to_string(), store.printStorePath(storePath)); + // FIXME: ideally we would use the `showPath()` of the + // "real" accessor for this fetcher type. + accessor->setPathDisplay("«" + to_string(true) + "»"); - auto accessor = store.requireStoreObjectAccessor(storePath); + return {accessor, *this}; + }; - accessor->fingerprint = getFingerprint(store); + /* If a tree with the expected hash is already in the Nix store, + reuse it. We only do this for final inputs, since otherwise + there is a risk that we don't return the same attributes (like + `lastModified`) that the "real" fetcher would return. */ + if (storePath && store.isValidPath(*storePath)) { + debug("using input '%s' in '%s'", to_string(), store.printStorePath(*storePath)); + return makeStoreAccessor(); + } - // Store a cache entry for the substituted tree so later fetches - // can reuse the existing nar instead of copying the unpacked - // input back into the store on every evaluation. - if (accessor->fingerprint) { - settings.getCache()->upsert( - makeSourcePathToHashCacheKey( - *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, CanonPath::root), - {{"hash", store.queryPathInfo(storePath)->narHash.to_string(HashFormat::SRI, true)}}); - } + auto fixupAccessor = [&](ref accessor, Input result) -> std::pair, Input> { + if (auto fp = accessor->getFingerprint(CanonPath::root).second) + result.cachedFingerprint = *fp; + else + accessor->fingerprint = result.getFingerprint(store); - accessor->setPathDisplay("«" + to_string() + "»"); + accessor->provenance = std::make_shared(result); + + return {accessor, result}; + }; - return {accessor, *this}; - } catch (Error & e) { - debug("substitution of input '%s' failed: %s", to_string(), e.what()); + /* Acquire a path lock on this input. Note that fetching the same input in parallel is supposed to be safe (it's up + * to the fetchers to guarantee this), so this is merely intended to avoid work duplication. */ + auto lockFilePath = + getCacheDir() / "fetcher-locks" + / hashString(HashAlgorithm::SHA256, attrsToJSON(toAttrs()).dump()).to_string(HashFormat::Base16, false); + std::filesystem::create_directories(lockFilePath.parent_path()); + PathLocks lock( + {lockFilePath.string()}, fmt("waiting for another Nix process to finish fetching input '%s'...", to_string())); + + if (getEnv("_NIX_TEST_CONCURRENT_FETCHES")) + std::this_thread::sleep_for(std::chrono::seconds(1)); + + /* See if the input is in the cache of the fetcher. */ + try { + if (auto res = scheme->getAccessor(settings, store, *this, true)) + return fixupAccessor(res->first, std::move(res->second)); + } catch (...) { + } + + /* If not, try to substitute the input. */ + if (storePath) { + try { + store.ensurePath(*storePath); + return makeStoreAccessor(); + } + // Ignore any substitution error. + catch (Error & e2) { + debug("substitution of input '%s' failed: %s", to_string(), e2.info().msg); + } catch (...) { } } + /* If we can't substitute, then fetch normally. */ auto [accessor, result] = scheme->getAccessor(settings, store, *this); - - if (!accessor->fingerprint) - accessor->fingerprint = result.getFingerprint(store); - else - result.cachedFingerprint = accessor->fingerprint; - - return {accessor, std::move(result)}; + return fixupAccessor(accessor, result); } Input Input::applyOverrides(std::optional ref, std::optional rev) const @@ -464,7 +507,7 @@ std::optional Input::getLastModified() const return {}; } -ParsedURL InputScheme::toURL(const Input & input) const +ParsedURL InputScheme::toURL(const Input & input, bool abbreviate) const { throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs)); } diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index 0daedcf7c78f..a42b5822245e 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -1,4 +1,5 @@ #include "nix/fetchers/filtering-source-accessor.hh" +#include "nix/util/sync.hh" #include @@ -61,6 +62,13 @@ std::pair> FilteringSourceAccessor::getFin return next->getFingerprint(prefix / path); } +std::shared_ptr FilteringSourceAccessor::getProvenance(const CanonPath & path) +{ + if (provenance) + return SourceAccessor::getProvenance(path); + return next->getProvenance(prefix / path); +} + void FilteringSourceAccessor::invalidateCache(const CanonPath & path) { next->invalidateCache(prefix / path); @@ -74,8 +82,8 @@ void FilteringSourceAccessor::checkAccess(const CanonPath & path) struct AllowListSourceAccessorImpl : AllowListSourceAccessor { - std::set allowedPrefixes; - boost::unordered_flat_set allowedPaths; + SharedSync> allowedPrefixes; + SharedSync> allowedPaths; AllowListSourceAccessorImpl( ref next, @@ -90,12 +98,12 @@ struct AllowListSourceAccessorImpl : AllowListSourceAccessor bool isAllowed(const CanonPath & path) override { - return allowedPaths.contains(path) || path.isAllowed(allowedPrefixes); + return allowedPaths.readLock()->contains(path) || path.isAllowed(*allowedPrefixes.readLock()); } void allowPrefix(CanonPath prefix) override { - allowedPrefixes.insert(std::move(prefix)); + allowedPrefixes.lock()->insert(std::move(prefix)); } }; diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 456d869983c2..1911ebdd9dc5 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -13,6 +13,7 @@ #include "nix/util/util.hh" #include "nix/util/thread-pool.hh" #include "nix/util/pool.hh" +#include "nix/util/executable-path.hh" #include #include @@ -427,7 +428,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this ThreadPool pool; - auto process = [&done, &pool, &repoPool](this const auto & process, const git_oid & oid) -> void { + auto process = [&done, &pool, &repoPool](this auto const & process, const git_oid & oid) -> void { auto repo(repoPool.get()); auto _commit = lookupObject(*repo, oid, GIT_OBJECT_COMMIT); @@ -637,28 +638,46 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this // that) // then use code that was removed in this commit (see blame) - auto dir = this->path; - OsStrings gitArgs = { - OS_STR("-C"), - dir.native(), - OS_STR("--git-dir"), - OS_STR("."), - OS_STR("fetch"), - OS_STR("--progress"), - OS_STR("--force"), - }; - if (shallow) { - gitArgs.push_back(OS_STR("--depth")); - gitArgs.push_back(OS_STR("1")); - } - gitArgs.push_back(OS_STR("--")); - gitArgs.push_back(string_to_os_string(url)); - gitArgs.push_back(string_to_os_string(refspec)); + if (ExecutablePath::load().findName("git")) { + auto dir = this->path; + + // Remove shallow.lock left behind by a previously interrupted `git fetch`, as it would prevent `git fetch` + // from running. Note that we already have a repository-wide `PathLock` (see git.cc), so this is safe. + std::filesystem::remove(dir / "shallow.lock"); + + OsStrings gitArgs{"-C", dir.native(), "--git-dir", ".", "fetch", "--progress", "--force"}; + if (shallow) { + gitArgs.push_back(OS_STR("--depth")); + gitArgs.push_back(OS_STR("1")); + } + gitArgs.push_back(OS_STR("--")); + gitArgs.push_back(string_to_os_string(url)); + gitArgs.push_back(string_to_os_string(refspec)); + + auto status = runProgram(RunOptions{.program = "git", .args = gitArgs, .isInteractive = true}).first; + + if (status > 0) + throw Error("Failed to fetch git repository '%s'", url); + } else { + // Fall back to using libgit2 for fetching. This does not + // support SSH very well. + Remote remote; - auto status = runProgram({.program = "git", .args = gitArgs, .isInteractive = true}).first; + if (git_remote_create_anonymous(Setter(remote), *this, url.c_str())) + throw Error("cannot create Git remote '%s': %s", url, git_error_last()->message); - if (status > 0) - throw Error("Failed to fetch git repository '%s'", url); + char * refspecs[] = {(char *) refspec.c_str()}; + git_strarray refspecs2{.strings = refspecs, .count = 1}; + + git_fetch_options opts = GIT_FETCH_OPTIONS_INIT; + // FIXME: for some reason, shallow fetching over ssh barfs + // with "could not read from remote repository". + opts.depth = shallow && parseURL(url).scheme != "ssh" ? 1 : GIT_FETCH_DEPTH_FULL; + opts.callbacks.payload = &act; + + if (git_remote_fetch(remote.get(), &refspecs2, &opts, nullptr)) + throw Error("fetching '%s' from '%s': %s", refspec, url, git_error_last()->message); + } } void verifyCommit(const Hash & rev, const std::vector & publicKeys) override @@ -774,10 +793,14 @@ ref GitRepo::openRepo(const std::filesystem::path & path, GitRepo::Opti return make_ref(path, options); } +std::string GitAccessorOptions::makeFingerprint(const Hash & rev) const +{ + return "git:" + rev.gitRev() + (exportIgnore ? ";e" : "") + (smudgeLfs ? ";l" : ""); +} + /** * Raw git tree input accessor. */ - struct GitSourceAccessor : SourceAccessor { struct State @@ -798,6 +821,7 @@ struct GitSourceAccessor : SourceAccessor .options = options, }} { + fingerprint = options.makeFingerprint(rev); } void readBlob(const CanonPath & path, bool symlink, Sink & sink, std::function sizeCallback) @@ -1493,7 +1517,10 @@ ref Settings::getTarballCache() const * for optimal packfiles. */ static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache-v2"; - return GitRepo::openRepo(repoDir, {.create = true, .bare = true, .packfilesOnly = true}); + auto tarballCache(_tarballCache.lock()); + if (!*tarballCache) + *tarballCache = GitRepo::openRepo(repoDir, {.create = true, .bare = true, .packfilesOnly = true}); + return ref(*tarballCache); } } // namespace fetchers diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index f6ae63c815ee..fadc68400166 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -17,6 +17,7 @@ #include "nix/util/json-utils.hh" #include "nix/util/archive.hh" #include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" #include #include @@ -32,12 +33,6 @@ namespace nix::fetchers { namespace { -// Explicit initial branch of our bare repo to suppress warnings from new version of git. -// The value itself does not matter, since we always fetch a specific revision or branch. -// It is set with `-c init.defaultBranch=` instead of `--initial-branch=` to stay compatible with -// old version of git, which will ignore unrecognized `-c` options. -const std::string gitInitialBranch = "__nix_dummy_branch"; - static bool isCacheFileWithinTtl(const Settings & settings, time_t now, const PosixStat & st) { return st.st_mtime + static_cast(settings.tarballTtl) > now; @@ -131,7 +126,7 @@ static std::optional readHeadCached(const Settings & settings, cons std::optional cachedRef; if (st) { cachedRef = readHead(cacheDir); - if (cachedRef != std::nullopt && *cachedRef != gitInitialBranch && isCacheFileWithinTtl(settings, now, *st)) { + if (cachedRef != std::nullopt && isCacheFileWithinTtl(settings, now, *st)) { debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl); return cachedRef; } @@ -407,15 +402,17 @@ struct GitInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); if (url.scheme != "git") url.scheme = "git+" + url.scheme; if (auto rev = input.getRev()) url.query.insert_or_assign("rev", rev->gitRev()); - if (auto ref = input.getRef()) - url.query.insert_or_assign("ref", *ref); + if (auto ref = input.getRef()) { + if (!abbreviate || (*ref != "master" && *ref != "main")) + url.query.insert_or_assign("ref", *ref); + } if (getShallowAttr(input)) url.query.insert_or_assign("shallow", "1"); if (getLfsAttr(input)) @@ -577,10 +574,10 @@ struct GitInputScheme : InputScheme { if (workdirInfo.isDirty) { if (!settings.allowDirty) - throw Error("Git tree '%s' is dirty", locationToArg()); + throw Error("Git tree '%s' has uncommitted changes", locationToArg()); if (settings.warnDirty) - warn("Git tree '%s' is dirty", locationToArg()); + warn("Git tree '%s' has uncommitted changes", locationToArg()); } } @@ -797,15 +794,123 @@ struct GitInputScheme : InputScheme } } - std::pair, Input> - getAccessorFromCommit(const Settings & settings, Store & store, RepoInfo & repoInfo, Input && input) const + /** + * Decide whether we can do a shallow clone, which is faster. This is possible if the user explicitly specified + * `shallow = true`, or if we already have a `revCount`. + */ + bool canDoShallow(const Input & input) const + { + bool shallow = getShallowAttr(input); + return shallow || input.getRevCount().has_value(); + } + + GitAccessorOptions getGitAccessorOptions(const Input & input) const + { + return GitAccessorOptions{ + .exportIgnore = getExportIgnoreAttr(input), + .smudgeLfs = getLfsAttr(input), + .submodules = getSubmodulesAttr(input), + }; + } + + /** + * Get a `SourceAccessor` for the given Git revision using Nix < 2.20 semantics, i.e. using `git archive` or `git + * checkout`. + */ + ref getLegacyGitAccessor( + const Settings & settings, + Store & store, + RepoInfo & repoInfo, + const std::filesystem::path & repoDir, + const Hash & rev, + GitAccessorOptions & options) const + { + if (!options.submodules) + options.exportIgnore = true; + + auto fingerprint = options.makeFingerprint(rev) + ";legacy"; + + auto cacheKey = + makeSourcePathToHashCacheKey(fingerprint, ContentAddressMethod::Raw::NixArchive, CanonPath::root); + + auto makeAccessor = [&](const auto & storePath) -> ref { + auto accessor = store.getFSAccessor(storePath); + accessor->fingerprint = fingerprint; + return ref{accessor}; + }; + + if (auto res = settings.getCache()->lookup(cacheKey)) { + auto hash = Hash::parseSRI(fetchers::getStrAttr(*res, "hash")); + auto storePath = store.makeFixedOutputPathFromCA( + "source", ContentAddressWithReferences::fromParts(ContentAddressMethod::Raw::NixArchive, hash, {})); + store.addTempRoot(storePath); + if (store.maybeQueryPathInfo(storePath)) { + debug("using cached legacy export of revision '%s'", rev.gitRev()); + return makeAccessor(storePath); + } + } + + debug("doing legacy export of revision '%s'", rev.gitRev()); + + auto tmpDir = createTempDir(); + AutoDelete delTmpDir(tmpDir, true); + + auto storePath = + options.submodules + ? [&]() { + // Nix < 2.20 used `git checkout` for repos with submodules. + StringSink sink; // don't pollute stdout + runProgram2({.program = "git", .args = {"init", tmpDir, "-b", "master"}, .standardOut = &sink}); + runProgram2( + {.program = "git", + .args = {"-C", tmpDir, "remote", "add", "origin", repoDir}, + .standardOut = &sink}); + runProgram2( + {.program = "git", + .args = {"-C", tmpDir, "fetch", "--quiet", "origin", rev.gitRev()}, + .standardOut = &sink}); + runProgram2( + {.program = "git", + .args = {"-C", tmpDir, "checkout", "--quiet", rev.gitRev()}, + .standardOut = &sink}); + PathFilter filter = [&](const std::string & path) { return baseNameOf(path) != ".git"; }; + return store.addToStore( + "source", + {getFSSourceAccessor(), CanonPath(tmpDir.string())}, + ContentAddressMethod::Raw::NixArchive, + HashAlgorithm::SHA256, + {}, + filter); + }() + : [&]() { + // Nix < 2.20 used `git archive` for repos without submodules. + auto source = sinkToSource([&](Sink & sink) { + runProgram2( + {.program = "git", + .args = {"-C", repoDir, "--git-dir", repoInfo.gitDir, "archive", rev.gitRev()}, + .standardOut = &sink}); + }); + + unpackTarfile(*source, tmpDir); + + return store.addToStore("source", {getFSSourceAccessor(), CanonPath(tmpDir.string())}); + }(); + + settings.getCache()->upsert( + cacheKey, {{"hash", store.queryPathInfo(storePath)->narHash.to_string(HashFormat::SRI, true)}}); + + return makeAccessor(storePath); + } + + std::optional, Input>> getAccessorFromCommit( + const Settings & settings, Store & store, RepoInfo & repoInfo, Input && input, bool fastOnly) const { assert(!repoInfo.workdirInfo.isDirty); auto origRev = input.getRev(); auto originalRef = input.getRef(); - bool shallow = getShallowAttr(input); + bool shallow = canDoShallow(input); auto ref = originalRef ? *originalRef : getDefaultRef(settings, repoInfo, shallow); input.attrs.insert_or_assign("ref", ref); @@ -816,11 +921,27 @@ struct GitInputScheme : InputScheme if (!input.getRev()) input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir, {})->resolveRef(ref).gitRev()); } else { + auto rev = input.getRev(); auto repoUrl = std::get(repoInfo.location); std::filesystem::path cacheDir = getCachePath(repoUrl.to_string(), shallow); repoDir = cacheDir; repoInfo.gitDir = "."; + /* If shallow = false, but we have a non-shallow repo that already contains the desired rev, then use that + * repo instead. */ + std::filesystem::path cacheDirNonShallow = getCachePath(repoUrl.to_string(), false); + if (rev && shallow && pathExists(cacheDirNonShallow)) { + auto nonShallowRepo = GitRepo::openRepo(cacheDirNonShallow, {.create = true, .bare = true}); + if (nonShallowRepo->hasObject(*rev)) { + debug( + "using non-shallow cached repo for '%s' since it contains rev '%s'", + repoUrl.to_string(), + rev->gitRev()); + repoDir = cacheDirNonShallow; + goto have_rev; + } + } + std::filesystem::create_directories(cacheDir.parent_path()); PathLocks cacheDirLock({cacheDir.string()}); @@ -836,7 +957,7 @@ struct GitInputScheme : InputScheme /* If a rev was specified, we need to fetch if it's not in the repo. */ - if (auto rev = input.getRev()) { + if (rev) { doFetch = !repo->hasObject(*rev); } else { if (getAllRefsAttr(input)) { @@ -850,7 +971,9 @@ struct GitInputScheme : InputScheme } if (doFetch) { - bool shallow = getShallowAttr(input); + if (fastOnly) + return std::nullopt; + try { auto fetchRef = getAllRefsAttr(input) ? "refs/*:refs/*" : input.getRev() ? input.getRev()->gitRev() @@ -878,7 +1001,7 @@ struct GitInputScheme : InputScheme warn("could not update cached head '%s' for '%s'", ref, repoInfo.locationToArg()); } - if (auto rev = input.getRev()) { + if (rev) { if (!repo->hasObject(*rev)) throw Error( "Cannot find Git revision '%s' in ref '%s' of repository '%s'! " @@ -895,40 +1018,88 @@ struct GitInputScheme : InputScheme // the remainder } + have_rev: auto repo = GitRepo::openRepo(repoDir, {}); - auto isShallow = repo->isShallow(); - - if (isShallow && !getShallowAttr(input)) - throw Error( - "'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", - repoInfo.locationToArg()); - // FIXME: check whether rev is an ancestor of ref? auto rev = *input.getRev(); - input.attrs.insert_or_assign("lastModified", getLastModified(settings, repoInfo, repoDir, rev)); + /* Skip lastModified computation if it's already supplied by the caller. + We don't care if they specify an incorrect value; it doesn't + matter for security, unlike narHash. */ + if (!input.attrs.contains("lastModified")) + input.attrs.insert_or_assign("lastModified", getLastModified(settings, repoInfo, repoDir, rev)); + + /* Like lastModified, skip revCount if supplied by the caller. */ + if (!shallow && !input.attrs.contains("revCount")) { + auto isShallow = repo->isShallow(); + + if (isShallow && !shallow) + throw Error( + "'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", + repoInfo.locationToArg()); - if (!getShallowAttr(input)) input.attrs.insert_or_assign("revCount", getRevCount(settings, repoInfo, repoDir, rev)); + } printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.locationToArg()); verifyCommit(input, repo); - bool exportIgnore = getExportIgnoreAttr(input); - bool smudgeLfs = getLfsAttr(input); - auto accessor = repo->getAccessor( - rev, {.exportIgnore = exportIgnore, .smudgeLfs = smudgeLfs}, "«" + input.to_string() + "»"); + auto options = getGitAccessorOptions(input); + + auto expectedNarHash = input.getNarHash(); + + auto accessor = repo->getAccessor(rev, options, "«" + input.to_string(true) + "»"); + + if (settings.nix219Compat && !options.smudgeLfs) { + /* Use Nix 2.19 semantics to generate locks, but if a NAR hash is specified, support Nix >= 2.20 semantics + * as well. */ + warn("Using Nix 2.19 semantics to export Git repository '%s'.", input.to_string()); + auto accessorModern = accessor; + accessor = getLegacyGitAccessor(settings, store, repoInfo, repoDir, rev, options); + if (expectedNarHash) { + auto narHashLegacy = + fetchToStore2(settings, store, {accessor}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash != narHashLegacy) { + auto narHashModern = + fetchToStore2(settings, store, {accessorModern}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash == narHashModern) + accessor = accessorModern; + } + } + } else { + /* Backward compatibility hack for locks produced by Nix < 2.20 that depend on Nix applying Git filters, + * `export-ignore` or `export-subst`. Nix >= 2.20 doesn't do those, so we may get a NAR hash mismatch. If + * that happens, try again using `git archive`. */ + if (expectedNarHash) { + auto narHashNew = fetchToStore2(settings, store, {accessor}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash != narHashNew) { + auto accessorLegacy = getLegacyGitAccessor(settings, store, repoInfo, repoDir, rev, options); + auto narHashLegacy = + fetchToStore2(settings, store, {accessorLegacy}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash == narHashLegacy) { + warn( + "Git input '%s' specifies a NAR hash '%s' that was created by Nix < 2.20.\n" + "Nix >= 2.20 does not apply Git filters, `export-ignore` and `export-subst` by default, which changes the NAR hash.\n" + "Please update the NAR hash to '%s'.", + input.to_string(), + expectedNarHash->to_string(HashFormat::SRI, true), + narHashNew.to_string(HashFormat::SRI, true)); + accessor = accessorLegacy; + } + } + } + } /* If the repo has submodules, fetch them and return a mounted input accessor consisting of the accessor for the top-level repo and the accessors for the submodules. */ - if (getSubmodulesAttr(input)) { + if (options.submodules) { std::map> mounts; - for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, exportIgnore)) { + for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, options.exportIgnore)) { auto resolved = repo->resolveSubmoduleUrl(submodule.url); debug( "Git submodule %s: %s %s %s -> %s", @@ -951,25 +1122,27 @@ struct GitInputScheme : InputScheme } } attrs.insert_or_assign("rev", submoduleRev.gitRev()); - attrs.insert_or_assign("exportIgnore", Explicit{exportIgnore}); + attrs.insert_or_assign("exportIgnore", Explicit{options.exportIgnore}); attrs.insert_or_assign("submodules", Explicit{true}); - attrs.insert_or_assign("lfs", Explicit{smudgeLfs}); + attrs.insert_or_assign("lfs", Explicit{options.smudgeLfs}); attrs.insert_or_assign("allRefs", Explicit{true}); auto submoduleInput = fetchers::Input::fromAttrs(settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(settings, store); - submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); + submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string(true) + "»"); mounts.insert_or_assign(submodule.path, submoduleAccessor); } if (!mounts.empty()) { + auto newFingerprint = accessor->getFingerprint(CanonPath::root).second->append(";s"); mounts.insert_or_assign(CanonPath::root, accessor); accessor = makeMountedSourceAccessor(std::move(mounts)); + accessor->fingerprint = newFingerprint; } } assert(!origRev || origRev == rev); - return {accessor, std::move(input)}; + return {{accessor, std::move(input)}}; } std::pair, Input> @@ -1007,7 +1180,7 @@ struct GitInputScheme : InputScheme auto submoduleInput = fetchers::Input::fromAttrs(settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(settings, store); - submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); + submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string(true) + "»"); /* If the submodule is dirty, mark this repo dirty as well. */ @@ -1056,8 +1229,8 @@ struct GitInputScheme : InputScheme return {accessor, std::move(input)}; } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { Input input(_input); @@ -1072,22 +1245,19 @@ struct GitInputScheme : InputScheme throw UnimplementedError("exportIgnore and submodules are not supported together yet"); } - auto [accessor, final] = input.getRef() || input.getRev() || !repoInfo.getPath() - ? getAccessorFromCommit(settings, store, repoInfo, std::move(input)) - : getAccessorFromWorkdir(settings, store, repoInfo, std::move(input)); - - return {accessor, std::move(final)}; + return input.getRef() || input.getRev() || !repoInfo.getPath() + ? getAccessorFromCommit(settings, store, repoInfo, std::move(input), fastOnly) + : std::optional{getAccessorFromWorkdir(settings, store, repoInfo, std::move(input))}; } std::optional getFingerprint(Store & store, const Input & input) const override { - auto makeFingerprint = [&](const Hash & rev) { - return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "") - + (getLfsAttr(input) ? ";l" : ""); - }; + auto options = getGitAccessorOptions(input); if (auto rev = input.getRev()) - return makeFingerprint(*rev); + // FIXME: this can return a wrong fingerprint for the legacy (`git archive`) case, since we don't know here + // whether to append the `;legacy` suffix or not. + return options.makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { @@ -1103,7 +1273,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + return options.makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index b86fa926a668..284620985f8a 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -162,7 +162,7 @@ struct GitArchiveInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto owner = getStrAttr(input.attrs, "owner"); auto repo = getStrAttr(input.attrs, "repo"); @@ -173,7 +173,7 @@ struct GitArchiveInputScheme : InputScheme if (ref) path.push_back(*ref); if (rev) - path.push_back(rev->to_string(HashFormat::Base16, false)); + path.push_back(abbreviate ? rev->gitShortRev() : rev->gitRev()); auto url = ParsedURL{ .scheme = std::string{schemeName()}, .path = path, @@ -271,7 +271,8 @@ struct GitArchiveInputScheme : InputScheme time_t lastModified; }; - std::pair downloadArchive(const Settings & settings, Store & store, Input input) const + std::optional> + downloadArchive(const Settings & settings, Store & store, Input input, bool fastOnly) const { if (!maybeGetStrAttr(input.attrs, "ref")) input.attrs.insert_or_assign("ref", "HEAD"); @@ -299,12 +300,16 @@ struct GitArchiveInputScheme : InputScheme auto treeHash = getRevAttr(*treeHashAttrs, "treeHash"); auto lastModified = getIntAttr(*lastModifiedAttrs, "lastModified"); if (settings.getTarballCache()->hasObject(treeHash)) - return {std::move(input), TarballInfo{.treeHash = treeHash, .lastModified = (time_t) lastModified}}; + return { + {std::move(input), TarballInfo{.treeHash = treeHash, .lastModified = (time_t) lastModified}}}; else debug("Git tree with hash '%s' has disappeared from the cache, refetching...", treeHash.gitRev()); } } + if (fastOnly) + return std::nullopt; + /* Stream the tarball into the tarball cache. */ auto url = getDownloadUrl(settings, input); @@ -340,13 +345,17 @@ struct GitArchiveInputScheme : InputScheme rev->gitRev(), input.to_string(), upstreamTreeHash->gitRev(), tarballInfo.treeHash.gitRev()); #endif - return {std::move(input), tarballInfo}; + return {{std::move(input), tarballInfo}}; } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { - auto [input, tarballInfo] = downloadArchive(settings, store, _input); + auto res = downloadArchive(settings, store, _input, fastOnly); + if (fastOnly && !res) + return std::nullopt; + assert(res); + auto [input, tarballInfo] = *res; #if 0 input.attrs.insert_or_assign("treeHash", tarballInfo.treeHash.gitRev()); @@ -354,9 +363,16 @@ struct GitArchiveInputScheme : InputScheme input.attrs.insert_or_assign("lastModified", uint64_t(tarballInfo.lastModified)); auto accessor = - settings.getTarballCache()->getAccessor(tarballInfo.treeHash, {}, "«" + input.to_string() + "»"); + settings.getTarballCache()->getAccessor(tarballInfo.treeHash, {}, "«" + input.to_string(true) + "»"); + + if (!settings.trustTarballsFromGitForges) + // FIXME: computing the NAR hash here is wasteful if + // copyInputToStore() is just going to hash/copy it as + // well. + input.attrs.insert_or_assign( + "narHash", accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true)); - return {accessor, input}; + return {{accessor, input}}; } bool isLocked(const Settings & settings, const Input & input) const override @@ -368,15 +384,10 @@ struct GitArchiveInputScheme : InputScheme return input.getRev().has_value() && (settings.trustTarballsFromGitForges || input.getNarHash().has_value()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - std::optional getFingerprint(Store & store, const Input & input) const override { if (auto rev = input.getRev()) - return rev->gitRev(); + return "github:" + rev->gitRev(); else return std::nullopt; } @@ -454,8 +465,7 @@ struct GitHubInputScheme : GitArchiveInputScheme : headers.empty() ? "https://%s/%s/%s/archive/%s.tar.gz" : "https://api.%s/repos/%s/%s/tarball/%s"; - const auto url = - fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->to_string(HashFormat::Base16, false)); + const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->gitRev()); return DownloadUrl{parseURL(url), headers}; } @@ -542,7 +552,7 @@ struct GitLabInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), - input.getRev()->to_string(HashFormat::Base16, false)); + input.getRev()->gitRev()); Headers headers = makeHeadersWithAuthTokens(settings, host, input); return DownloadUrl{parseURL(url), headers}; @@ -638,7 +648,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), - input.getRev()->to_string(HashFormat::Base16, false)); + input.getRev()->gitRev()); Headers headers = makeHeadersWithAuthTokens(settings, host, input); return DownloadUrl{parseURL(url), headers}; diff --git a/src/libfetchers/include/nix/fetchers/cache.hh b/src/libfetchers/include/nix/fetchers/cache.hh index 7219635ec07d..8cac076f1f20 100644 --- a/src/libfetchers/include/nix/fetchers/cache.hh +++ b/src/libfetchers/include/nix/fetchers/cache.hh @@ -67,9 +67,9 @@ struct Cache /** * Look up a store path in the cache. The returned store path will - * be valid, but it may be expired. + * be valid (unless `allowInvalid` is true), but it may be expired. */ - virtual std::optional lookupStorePath(Key key, Store & store) = 0; + virtual std::optional lookupStorePath(Key key, Store & store, bool allowInvalid = false) = 0; /** * Look up a store path in the cache. Return nothing if its TTL diff --git a/src/libfetchers/include/nix/fetchers/fetch-settings.hh b/src/libfetchers/include/nix/fetchers/fetch-settings.hh index 2ab215a685ff..0e7edaa00e68 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-settings.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-settings.hh @@ -94,10 +94,7 @@ struct Settings : public Config are subsequently modified. Therefore lock files with dirty locks should generally only be used for local testing, and should not be pushed to other users. - )", - {}, - true, - Xp::Flakes}; + )"}; Setting trustTarballsFromGitForges{ this, @@ -118,16 +115,23 @@ struct Settings : public Config Setting flakeRegistry{ this, - "https://channels.nixos.org/flake-registry.json", + "https://install.determinate.systems/flake-registry/stable/flake-registry.json", "flake-registry", R"( Path or URI of the global flake registry. When empty, disables the global flake registry. - )", - {}, - true, - Xp::Flakes}; + )"}; + + Setting nix219Compat{ + this, + false, + "nix-219-compat", + R"( + If enabled, Nix will generate lock files that are compatible with Nix 2.19. + In particular, Nix will use `git archive` rather than `libgit2` to copy Git inputs. + The resulting locks may not be compatible with Nix >= 2.20. + )"}; Setting tarballTtl{ this, @@ -154,6 +158,17 @@ struct Settings : public Config private: mutable Sync> _cache; + + mutable Sync> _tarballCache; }; } // namespace nix::fetchers + +namespace nix { + +/** + * @todo Get rid of global setttings variables + */ +extern fetchers::Settings fetchSettings; + +} // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetchers.hh b/src/libfetchers/include/nix/fetchers/fetchers.hh index 180d10e9dbbf..d830d83c840a 100644 --- a/src/libfetchers/include/nix/fetchers/fetchers.hh +++ b/src/libfetchers/include/nix/fetchers/fetchers.hh @@ -61,11 +61,11 @@ public: */ static Input fromAttrs(const Settings & settings, Attrs && attrs); - ParsedURL toURL() const; + ParsedURL toURL(bool abbreviate = false) const; - std::string toURLString(const StringMap & extraQuery = {}) const; + std::string toURLString(const StringMap & extraQuery = {}, bool abbreviate = false) const; - std::string to_string() const; + std::string to_string(bool abbreviate = false) const; Attrs toAttrs() const; @@ -113,7 +113,7 @@ public: * Fetch the entire input into the Nix store, returning the * location in the Nix store and the locked input. */ - std::pair fetchToStore(const Settings & settings, Store & store) const; + std::tuple, Input> fetchToStore(const Settings & settings, Store & store) const; /** * Check the locking attributes in `result` against @@ -225,7 +225,7 @@ struct InputScheme */ virtual const std::map & allowedAttrs() const = 0; - virtual ParsedURL toURL(const Input & input) const; + virtual ParsedURL toURL(const Input & input, bool abbreviate = false) const; virtual Input applyOverrides(const Input & input, std::optional ref, std::optional rev) const; @@ -240,8 +240,19 @@ struct InputScheme std::string_view contents, std::optional commitMsg) const; + virtual std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & input, bool fastOnly) const + { + if (fastOnly) + return std::nullopt; + return getAccessor(settings, store, input); + } + virtual std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & input) const = 0; + getAccessor(const Settings & settings, Store & store, const Input & input) const + { + return getAccessor(settings, store, input, false).value(); + } /** * Is this `InputScheme` part of an experimental feature? diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 98532c4b14c2..67fb29228ea8 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -52,6 +52,8 @@ struct FilteringSourceAccessor : SourceAccessor std::pair> getFingerprint(const CanonPath & path) override; + std::shared_ptr getProvenance(const CanonPath & path) override; + void invalidateCache(const CanonPath & path) override; /** diff --git a/src/libfetchers/include/nix/fetchers/git-utils.hh b/src/libfetchers/include/nix/fetchers/git-utils.hh index 24a7b80087f8..eada8745c3eb 100644 --- a/src/libfetchers/include/nix/fetchers/git-utils.hh +++ b/src/libfetchers/include/nix/fetchers/git-utils.hh @@ -26,6 +26,9 @@ struct GitAccessorOptions { bool exportIgnore = false; bool smudgeLfs = false; + bool submodules = false; // Currently implemented in GitInputScheme rather than GitAccessor + + std::string makeFingerprint(const Hash & rev) const; }; struct GitRepo diff --git a/src/libfetchers/include/nix/fetchers/meson.build b/src/libfetchers/include/nix/fetchers/meson.build index a313b1e0bc0c..f3bb80942a28 100644 --- a/src/libfetchers/include/nix/fetchers/meson.build +++ b/src/libfetchers/include/nix/fetchers/meson.build @@ -10,6 +10,7 @@ headers = files( 'git-lfs-fetch.hh', 'git-utils.hh', 'input-cache.hh', + 'provenance.hh', 'registry.hh', 'tarball.hh', ) diff --git a/src/libfetchers/include/nix/fetchers/provenance.hh b/src/libfetchers/include/nix/fetchers/provenance.hh new file mode 100644 index 000000000000..82dc1b3b8109 --- /dev/null +++ b/src/libfetchers/include/nix/fetchers/provenance.hh @@ -0,0 +1,31 @@ +#pragma once + +#include "nix/util/provenance.hh" +#include "nix/fetchers/fetchers.hh" + +namespace nix { + +struct TreeProvenance : Provenance +{ + ref attrs; + + TreeProvenance(const fetchers::Input & input); + + TreeProvenance(ref attrs) + : attrs(std::move(attrs)) + { + } + + nlohmann::json to_json() const override; +}; + +struct FetchurlProvenance : Provenance +{ + std::string url; + + FetchurlProvenance(std::string url, bool sanitize = true); + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/registry.hh b/src/libfetchers/include/nix/fetchers/registry.hh index dc7e3edb590e..ca38dd805d6d 100644 --- a/src/libfetchers/include/nix/fetchers/registry.hh +++ b/src/libfetchers/include/nix/fetchers/registry.hh @@ -39,6 +39,9 @@ struct Registry static std::shared_ptr read(const Settings & settings, const SourcePath & path, RegistryType type); + static std::shared_ptr + read(const Settings & settings, std::string_view whence, std::string_view jsonStr, RegistryType type); + void write(const std::filesystem::path & path); void add(const Input & from, const Input & to, const Attrs & extraAttrs); diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc index b2a41a7421fd..e629dcbac6b0 100644 --- a/src/libfetchers/indirect.cc +++ b/src/libfetchers/indirect.cc @@ -100,7 +100,7 @@ struct IndirectInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { ParsedURL url{ .scheme = "flake", @@ -131,11 +131,6 @@ struct IndirectInputScheme : InputScheme throw Error("indirect input '%s' cannot be fetched directly", input.to_string()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - bool isDirect(const Input & input) const override { return false; diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index 38fe31fcadea..a53025dd7ff9 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -125,7 +125,7 @@ struct MercurialInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); url.scheme = "hg+" + url.scheme; @@ -282,9 +282,7 @@ struct MercurialInputScheme : InputScheme auto revInfoKey = [&](const Hash & rev) { if (rev.algo != HashAlgorithm::SHA1) - throw Error( - "Hash '%s' is not supported by Mercurial. Only sha1 is supported.", - rev.to_string(HashFormat::Base16, true)); + throw Error("Hash '%s' is not supported by Mercurial. Only sha1 is supported.", rev.gitRev()); return Cache::Key{"hgRev", {{"store", store.storeDir}, {"name", name}, {"rev", input.getRev()->gitRev()}}}; }; @@ -406,7 +404,7 @@ struct MercurialInputScheme : InputScheme auto storePath = fetchToStore(settings, store, input); auto accessor = store.requireStoreObjectAccessor(storePath); - accessor->setPathDisplay("«" + input.to_string() + "»"); + accessor->setPathDisplay("«" + input.to_string(true) + "»"); return {accessor, input}; } @@ -419,7 +417,7 @@ struct MercurialInputScheme : InputScheme std::optional getFingerprint(Store & store, const Input & input) const override { if (auto rev = input.getRev()) - return rev->gitRev(); + return "hg:" + rev->gitRev(); else return std::nullopt; } diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index d34dd4f434d1..2a3d356fe1c7 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -7,6 +7,8 @@ project( # TODO(Qyriad): increase the warning level 'warning_level=1', 'errorlogs=true', # Please print logs for tests that fail + 'unity=on', + 'unity_size=1024', ], meson_version : '>= 1.1', license : 'LGPL-2.1-or-later', @@ -35,6 +37,7 @@ subdir('nix-meson-build-support/common') sources = files( 'attrs.cc', + 'builtin.cc', 'cache.cc', 'fetch-settings.cc', 'fetch-to-store.cc', @@ -48,12 +51,20 @@ sources = files( 'input-cache.cc', 'mercurial.cc', 'path.cc', + 'provenance.cc', 'registry.cc', 'tarball.cc', ) subdir('include/nix/fetchers') +# Generate builtin-flake-registry.json.gen.hh +subdir('nix-meson-build-support/generate-header') + +sources += gen_header.process( + 'builtin-flake-registry.json', +) + subdir('nix-meson-build-support/export-all-symbols') subdir('nix-meson-build-support/windows-version') @@ -64,7 +75,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libfetchers/package.nix b/src/libfetchers/package.nix index 14592087999c..1a30ac293018 100644 --- a/src/libfetchers/package.nix +++ b/src/libfetchers/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers"; + pname = "determinate-nix-fetchers"; inherit version; workDir = ./.; @@ -28,6 +28,7 @@ mkMesonLibrary (finalAttrs: { ./.version ./meson.build ./include/nix/fetchers/meson.build + ./builtin-flake-registry.json (fileset.fileFilter (file: file.hasExt "cc") ./.) (fileset.fileFilter (file: file.hasExt "hh") ./.) ]; diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index cf7f6aa920dd..30f02c731f7f 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -87,7 +87,7 @@ struct PathInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto query = attrsToQuery(input.attrs); query.erase("path"); @@ -138,51 +138,37 @@ struct PathInputScheme : InputScheme throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string()); } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { + // Note: fastOnly is ignored because the path fetcher is always fast. + Input input(_input); - auto path = getStrAttr(input.attrs, "path"); auto absPath = getAbsPath(input); // FIXME: check whether access to 'path' is allowed. + + auto accessor = makeFSSourceAccessor(absPath); + auto storePath = store.maybeParseStorePath(absPath.string()); - if (storePath) + if (storePath) { store.addTempRoot(*storePath); - time_t mtime = 0; - if (!storePath || storePath->name() != "source" || !store.isValidPath(*storePath)) { - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", PathFmt(absPath))); - // FIXME: try to substitute storePath. - auto src = sinkToSource( - [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); - storePath = store.addToStoreFromDump(*src, "source"); + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store.maybeQueryPathInfo(*storePath); + if (info) { + accessor->fingerprint = fmt("path:%s", info->narHash.to_string(HashFormat::SRI, true)); + settings.getCache()->upsert( + makeSourcePathToHashCacheKey( + *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, CanonPath::root), + {{"hash", info->narHash.to_string(HashFormat::SRI, true)}}); + } } - auto accessor = store.requireStoreObjectAccessor(*storePath); - - // To prevent `fetchToStore()` copying the path again to Nix - // store, pre-create an entry in the fetcher cache. - auto narHash = store.queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true); - accessor->fingerprint = fmt("path:%s", narHash); - settings.getCache()->upsert( - makeSourcePathToHashCacheKey( - *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, CanonPath::root), - {{"hash", narHash}}); - - /* Trust the lastModified value supplied by the user, if - any. It's not a "secure" attribute so we don't care. */ - if (!input.getLastModified()) - input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - - return {accessor, std::move(input)}; - } - - std::optional experimentalFeature() const override - { - return Xp::Flakes; + return {{accessor, std::move(input)}}; } }; diff --git a/src/libfetchers/provenance.cc b/src/libfetchers/provenance.cc new file mode 100644 index 000000000000..e984b2271014 --- /dev/null +++ b/src/libfetchers/provenance.cc @@ -0,0 +1,58 @@ +#include "nix/fetchers/provenance.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +TreeProvenance::TreeProvenance(const fetchers::Input & input) + : attrs(make_ref([&]() { + // Remove the narHash attribute from the provenance info, as it's redundant (it's already recorded in the store + // path info). + auto attrs2 = input.attrs; + attrs2.erase("narHash"); + return fetchers::attrsToJSON(attrs2); + }())) +{ +} + +nlohmann::json TreeProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "tree"}, + {"attrs", *attrs}, + }; +} + +Provenance::Register registerTreeProvenance("tree", [](nlohmann::json json) { + auto & obj = getObject(json); + auto & attrsJson = valueAt(obj, "attrs"); + return make_ref(make_ref(attrsJson)); +}); + +FetchurlProvenance::FetchurlProvenance(std::string _url, bool sanitize) + : url(std::move(_url)) +{ + if (sanitize) { + try { + url = parseURL(url, true).renderSanitized(); + } catch (BadURL &) { + } + } +} + +nlohmann::json FetchurlProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "fetchurl"}, + {"url", url}, + }; +} + +Provenance::Register registerFetchurlProvenance("fetchurl", [](nlohmann::json json) { + auto & obj = getObject(json); + return make_ref(getString(valueAt(obj, "url")), false); +}); + +} // namespace nix diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index 9911586fa037..89990fe5f15c 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -14,14 +14,24 @@ std::shared_ptr Registry::read(const Settings & settings, const Source { debug("reading registry '%s'", path); - auto registry = std::make_shared(type); - if (!path.pathExists()) return std::make_shared(type); try { + return read(settings, path.to_string(), path.readFile(), type); + } catch (Error & e) { + warn("cannot read flake registry '%s': %s", path, e.what()); + return std::make_shared(type); + } +} - auto json = nlohmann::json::parse(path.readFile()); +std::shared_ptr +Registry::read(const Settings & settings, std::string_view whence, std::string_view jsonStr, RegistryType type) +{ + auto registry = std::make_shared(type); + + try { + auto json = nlohmann::json::parse(jsonStr); auto version = json.value("version", 0); @@ -45,12 +55,10 @@ std::shared_ptr Registry::read(const Settings & settings, const Source } else - throw Error("flake registry '%s' has unsupported version %d", path, version); + warn("flake registry '%s' has unsupported version %d", whence, version); } catch (nlohmann::json::exception & e) { - warn("cannot parse flake registry '%s': %s", path, e.what()); - } catch (Error & e) { - warn("cannot read flake registry '%s': %s", path, e.what()); + warn("cannot parse flake registry '%s': %s", whence, e.what()); } return registry; @@ -139,25 +147,39 @@ void overrideRegistry(const Input & from, const Input & to, const Attrs & extraA static std::shared_ptr getGlobalRegistry(const Settings & settings, Store & store) { static auto reg = [&]() { - auto path = settings.flakeRegistry.get(); - if (path == "") { - return std::make_shared(Registry::Global); // empty registry - } + try { + auto path = settings.flakeRegistry.get(); + if (path == "") { + return std::make_shared(Registry::Global); // empty registry + } - return Registry::read( - settings, - [&] -> SourcePath { - std::filesystem::path fsPath{path}; - if (!fsPath.is_absolute()) { - auto storePath = downloadFile(store, settings, path, "flake-registry.json").storePath; - if (auto store2 = dynamic_cast(&store)) - store2->addPermRoot(storePath, (getCacheDir() / "flake-registry.json").string()); - return {store.requireStoreObjectAccessor(storePath)}; - } else { - return SourcePath{getFSSourceAccessor(), CanonPath{fsPath.string()}}.resolveSymlinks(); - } - }(), - Registry::Global); + return Registry::read( + settings, + [&] -> SourcePath { + std::filesystem::path fsPath{path}; + if (!fsPath.is_absolute()) { + auto storePath = downloadFile(store, settings, path, "flake-registry.json").storePath; + if (auto store2 = dynamic_cast(&store)) + store2->addPermRoot(storePath, (getCacheDir() / "flake-registry.json").string()); + return {store.requireStoreObjectAccessor(storePath)}; + } else { + return SourcePath{getFSSourceAccessor(), CanonPath{fsPath.string()}}.resolveSymlinks(); + } + }(), + Registry::Global); + } catch (Error & e) { + warn( + "cannot fetch global flake registry '%s', will use builtin fallback registry: %s", + settings.flakeRegistry.get(), + e.info().msg); + // Use builtin registry as fallback + return Registry::read( + settings, + "builtin flake registry", +#include "builtin-flake-registry.json.gen.hh" + , + Registry::Global); + } }(); return reg; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 065174433110..5586229e56cf 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -9,6 +9,9 @@ #include "nix/store/store-api.hh" #include "nix/fetchers/git-utils.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/fetchers/provenance.hh" + +#include namespace nix::fetchers { @@ -83,6 +86,8 @@ DownloadFileResult downloadFile( }, hashString(HashAlgorithm::SHA256, sink.s)); info.narSize = sink.s.size(); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + info.provenance = std::make_shared(request.uri.to_string()); auto source = StringSource{sink.s}; store.addToStore(info, source, NoRepair, NoCheckSigs); storePath = std::move(info.path); @@ -104,8 +109,12 @@ DownloadFileResult downloadFile( }; } -static DownloadTarballResult downloadTarball_( - const Settings & settings, const std::string & urlS, const Headers & headers, const std::string & displayPrefix) +static std::optional downloadTarball_( + const Settings & settings, + const std::string & urlS, + const Headers & headers, + const std::string & displayPrefix, + bool fastOnly) { ParsedURL url = parseURL(urlS); @@ -155,6 +164,9 @@ static DownloadTarballResult downloadTarball_( `tarballTtl`, so no need to check the server. */ return attrsToResult(cached->value); + if (fastOnly) + return std::nullopt; + auto _res = std::make_shared>(); auto source = sinkToSource([&](Sink & sink) { @@ -382,7 +394,7 @@ struct CurlInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); // NAR hashes are preferred over file hashes since tar/zip @@ -437,7 +449,7 @@ struct FileInputScheme : CurlInputScheme auto accessor = ref{store.getFSAccessor(file.storePath)}; - accessor->setPathDisplay("«" + input.to_string() + "»"); + accessor->setPathDisplay("«" + input.to_string(true) + "»"); return {accessor, input}; } @@ -487,12 +499,16 @@ struct TarballInputScheme : CurlInputScheme : (requireTree || hasTarballExtension(url))); } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { auto input(_input); - auto result = downloadTarball_(settings, getStrAttr(input.attrs, "url"), {}, "«" + input.to_string() + "»"); + auto res = + downloadTarball_(settings, getStrAttr(input.attrs, "url"), {}, "«" + input.to_string(true) + "»", fastOnly); + if (!res) + return std::nullopt; + auto & result = *res; if (result.immutableUrl) { auto immutableInput = Input::fromURL(settings, *result.immutableUrl); @@ -510,15 +526,15 @@ struct TarballInputScheme : CurlInputScheme "narHash", settings.getTarballCache()->treeHashToNarHash(settings, result.treeHash).to_string(HashFormat::SRI, true)); - return {result.accessor, input}; + return {{result.accessor, input}}; } std::optional getFingerprint(Store & store, const Input & input) const override { if (auto narHash = input.getNarHash()) - return narHash->to_string(HashFormat::SRI, true); + return "tarball:" + narHash->to_string(HashFormat::SRI, true); else if (auto rev = input.getRev()) - return rev->gitRev(); + return "tarball:" + rev->gitRev(); else return std::nullopt; } diff --git a/src/libflake-c/meson.build b/src/libflake-c/meson.build index fddb39bdf96b..d0055e5d98fc 100644 --- a/src/libflake-c/meson.build +++ b/src/libflake-c/meson.build @@ -57,7 +57,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libflake-c/nix_api_flake.cc b/src/libflake-c/nix_api_flake.cc index cbf5430f54a9..2558236a7e5a 100644 --- a/src/libflake-c/nix_api_flake.cc +++ b/src/libflake-c/nix_api_flake.cc @@ -209,4 +209,20 @@ nix_value * nix_locked_flake_get_output_attrs( NIXC_CATCH_ERRS_NULL } +nix_err nix_locked_flake_read_path( + nix_c_context * context, + nix_locked_flake * lockedFlake, + const char * path, + nix_get_string_callback callback, + void * user_data) +{ + nix_clear_err(context); + try { + auto source_path = lockedFlake->lockedFlake->flake.path.parent() / nix::CanonPath(path); + auto v = source_path.readFile(); + return call_nix_get_string_callback(v, callback, user_data); + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libflake-c/nix_api_flake.h b/src/libflake-c/nix_api_flake.h index a3221e676bec..9884a3d39a49 100644 --- a/src/libflake-c/nix_api_flake.h +++ b/src/libflake-c/nix_api_flake.h @@ -239,6 +239,23 @@ void nix_flake_reference_free(nix_flake_reference * store); nix_value * nix_locked_flake_get_output_attrs( nix_c_context * context, nix_flake_settings * settings, EvalState * evalState, nix_locked_flake * lockedFlake); +/** + * @brief Reads a file within the flake. + * @note The callback borrows the string only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] locked_flake the flake to get the path for + * @param[in] path The path within the flake. + * @param[in] callback The callback to call with the string + * @param[in] user_data Additional data to pass for the callback + */ +nix_err nix_locked_flake_read_path( + nix_c_context * context, + nix_locked_flake * lockedFlake, + const char * path, + nix_get_string_callback callback, + void * user_data); + #ifdef __cplusplus } // extern "C" #endif diff --git a/src/libflake-c/package.nix b/src/libflake-c/package.nix index 8c6883d9cf95..9ae3ec695154 100644 --- a/src/libflake-c/package.nix +++ b/src/libflake-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake-c"; + pname = "determinate-nix-flake-c"; inherit version; workDir = ./.; diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index 5fcbbc9c30bf..332ffe18b7dc 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -17,8 +17,6 @@ namespace nix { TEST(parseFlakeRef, path) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -67,8 +65,6 @@ TEST(parseFlakeRef, path) TEST(parseFlakeRef, GitArchiveInput) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -111,7 +107,6 @@ class InputFromURLTest : public ::testing::WithParamInterface #include "nix/flake/flake-primops.hh" +#include "nix/store/store-api.hh" #include "nix/expr/eval.hh" #include "nix/flake/flake.hh" #include "nix/flake/flakeref.hh" @@ -29,34 +30,62 @@ #include "nix/util/source-path.hh" #include "nix/util/types.hh" #include "nix/util/util.hh" +#include "nix/util/mounted-source-accessor.hh" namespace nix::flake::primops { PrimOp getFlake(const Settings & settings) { auto prim_getFlake = [&settings](EvalState & state, const PosIdx pos, Value ** args, Value & v) { - std::string flakeRefS( - state.forceStringNoCtx(*args[0], pos, "while evaluating the argument passed to builtins.getFlake")); - auto flakeRef = nix::parseFlakeRef(state.fetchSettings, flakeRefS, {}, true); - if (state.settings.pureEval && !flakeRef.input.isLocked(state.fetchSettings)) - throw Error( - "cannot call 'getFlake' on unlocked flake reference '%s', at %s (use --impure to override)", - flakeRefS, - state.positions[pos]); - - callFlake( - state, - lockFlake( - settings, - state, - flakeRef, - LockFlags{ - .updateLockFile = false, - .writeLockFile = false, - .useRegistries = !state.settings.pureEval && settings.useRegistries, - .allowUnlocked = !state.settings.pureEval, - }), - v); + state.forceValue(*args[0], pos); + + LockFlags lockFlags{ + .updateLockFile = false, + .writeLockFile = false, + .useRegistries = !state.settings.pureEval && settings.useRegistries, + .allowUnlocked = !state.settings.pureEval, + }; + + if (args[0]->type() == nPath) { + auto path = state.realisePath(pos, *args[0]); + callFlake(state, lockFlake(settings, state, path, lockFlags), v); + } else { + NixStringContext context; + std::string flakeRefS( + state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.getFlake")); + auto rewrites = state.realiseContext(context); + flakeRefS = state.devirtualize(rewriteStrings(flakeRefS, rewrites), context); + if (hasContext(context)) + // FIXME: this should really be an error. + warn( + "In 'builtins.getFlake', the flakeref '%s' has string context, but that's not allowed. This may become a fatal error in the future.", + flakeRefS); + + auto flakeRef = nix::parseFlakeRef(state.fetchSettings, flakeRefS, {}, true); + if (state.settings.pureEval && !flakeRef.input.isLocked(state.fetchSettings)) + throw Error( + "cannot call 'getFlake' on unlocked flake reference '%s', at %s (use --impure to override)", + flakeRefS, + state.positions[pos]); + + /* Backward compatibility hack: If this is a `path` flake and it's a virtual path that had + * `unsafeDiscardStringContext` applied to it, then treat it like the `nPath` case, i.e. call lockFlake() on + * the virtual path directly. This is necessary because the `path` fetcher doesn't see virtual paths. */ + if (flakeRef.input.getType() == "path") { + if (auto sourcePath = flakeRef.input.getSourcePath(); + sourcePath && state.store->isInStore(sourcePath->string())) { + auto [storePath, subPath] = state.store->toStorePath(sourcePath->string()); + if (auto mount = state.storeFS->getMount(CanonPath(state.store->printStorePath(storePath)))) { + auto path = state.storePath(storePath) / CanonPath(subPath); + if (!flakeRef.subdir.empty()) + path = path / flakeRef.subdir; + return callFlake(state, lockFlake(settings, state, path, lockFlags), v); + } + } + } + + callFlake(state, lockFlake(settings, state, flakeRef, lockFlags), v); + } }; return PrimOp{ @@ -78,7 +107,6 @@ PrimOp getFlake(const Settings & settings) ``` )", .impl = prim_getFlake, - .experimentalFeature = Xp::Flakes, }; } @@ -120,13 +148,13 @@ nix::PrimOp parseFlakeRef({ ``` )", .impl = prim_parseFlakeRef, - .experimentalFeature = Xp::Flakes, }); static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** args, Value & v) { state.forceAttrs(*args[0], noPos, "while evaluating the argument passed to builtins.flakeRefToString"); fetchers::Attrs attrs; + NixStringContext context; for (const auto & attr : *args[0]->attrs()) { state.forceValue(*attr.value, attr.pos); auto t = attr.value->type(); @@ -145,7 +173,9 @@ static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** } else if (t == nBool) { attrs.emplace(state.symbols[attr.name], Explicit{attr.value->boolean()}); } else if (t == nString) { - attrs.emplace(state.symbols[attr.name], std::string(attr.value->string_view())); + auto s = state.forceString( + *attr.value, context, attr.pos, "while evaluating an attribute in 'builtins.flakeRefToString'"); + attrs.emplace(state.symbols[attr.name], std::string(s)); } else { state .error( @@ -157,7 +187,7 @@ static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** } } auto flakeRef = FlakeRef::fromAttrs(state.fetchSettings, attrs); - v.mkString(flakeRef.to_string(), state.mem); + v.mkString(flakeRef.to_string(), context, state.mem); } nix::PrimOp flakeRefToString({ @@ -181,7 +211,6 @@ nix::PrimOp flakeRefToString({ ``` )", .impl = prim_flakeRefToString, - .experimentalFeature = Xp::Flakes, }); } // namespace nix::flake::primops diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 67522d2c4751..1dc030314e56 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -36,6 +36,7 @@ #include "nix/expr/value-to-json.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" #include "nix/fetchers/input-cache.hh" #include "nix/expr/attr-set.hh" #include "nix/expr/eval-error.hh" @@ -66,20 +67,22 @@ namespace nix { struct SourceAccessor; using namespace flake; +using namespace fetchers; namespace flake { static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { - if (value.isThunk() && value.isTrivial()) + if (value.isTrivial()) state.forceValue(value, pos); } static void expectType(EvalState & state, ValueType type, Value & value, const PosIdx pos) { forceTrivialValue(state, value, pos); - if (value.type() != type) - throw Error("expected %s but got %s at %s", showType(type), showType(value.type()), state.positions[pos]); + auto t = value.type(); + if (t != type) + throw Error("expected %s but got %s at %s", showType(type), showType(t), state.positions[pos]); } static std::pair, fetchers::Attrs> parseFlakeInputs( @@ -90,7 +93,7 @@ static std::pair, fetchers::Attrs> parseFlakeInput const SourcePath & flakeDir, bool allowSelf); -static void parseFlakeInputAttr(EvalState & state, const Attr & attr, fetchers::Attrs & attrs) +static void parseFlakeInputAttr(EvalState & state, const nix::Attr & attr, fetchers::Attrs & attrs) { // Allow selecting a subset of enum values #pragma GCC diagnostic push @@ -144,6 +147,7 @@ static FlakeInput parseFlakeInput( auto sUrl = state.symbols.create("url"); auto sFlake = state.symbols.create("flake"); auto sFollows = state.symbols.create("follows"); + auto sBuildTime = state.symbols.create("buildTime"); fetchers::Attrs attrs; std::optional url; @@ -172,6 +176,11 @@ static FlakeInput parseFlakeInput( } else if (attr.name == sFlake) { expectType(state, nBool, *attr.value, attr.pos); input.isFlake = attr.value->boolean(); + } else if (attr.name == sBuildTime) { + expectType(state, nBool, *attr.value, attr.pos); + input.buildTime = attr.value->boolean(); + if (input.buildTime) + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); } else if (attr.name == sInputs) { input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootAttrPath, flakeDir, false).first; @@ -240,7 +249,7 @@ static std::pair, fetchers::Attrs> parseFlakeInput return {inputs, selfAttrs}; } -static Flake readFlake( +Flake readFlake( EvalState & state, const FlakeRef & originalRef, const FlakeRef & resolvedRef, @@ -260,6 +269,7 @@ static Flake readFlake( .resolvedRef = resolvedRef, .lockedRef = lockedRef, .path = flakePath, + .provenance = flakePath.getProvenance(), }; if (auto description = vInfo.attrs()->get(state.s.description)) { @@ -369,7 +379,8 @@ static Flake getFlake( EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, - const InputAttrPath & lockRootAttrPath) + const InputAttrPath & lockRootAttrPath, + bool requireLockable) { // Fetch a lazy tree first. auto cachedInput = @@ -401,13 +412,14 @@ static Flake getFlake( originalRef, resolvedRef, lockedRef, - state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor)), + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor, requireLockable)), lockRootAttrPath); } -Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) +Flake getFlake( + EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, bool requireLockable) { - return getFlake(state, originalRef, useRegistries, {}); + return getFlake(state, originalRef, useRegistries, {}, requireLockable); } static LockFile readLockFile(const fetchers::Settings & fetchSettings, const SourcePath & lockFilePath) @@ -416,19 +428,13 @@ static LockFile readLockFile(const fetchers::Settings & fetchSettings, const Sou : LockFile(); } -/* Compute an in-memory lock file for the specified top-level flake, - and optionally write it to file, if the flake is writable. */ -LockedFlake -lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) +LockedFlake lockFlake( + const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags, Flake flake) { - experimentalFeatureSettings.require(Xp::Flakes); - auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); auto useRegistriesTop = useRegistries ? fetchers::UseRegistries::All : fetchers::UseRegistries::No; auto useRegistriesInputs = useRegistries ? fetchers::UseRegistries::Limited : fetchers::UseRegistries::No; - auto flake = getFlake(state, topRef, useRegistriesTop, {}); - if (lockFlags.applyNixConfig) { flake.config.apply(settings); state.store->setOptions(); @@ -607,7 +613,7 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, if (auto resolvedPath = resolveRelativePath()) { return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath); } else { - return getFlake(state, ref, useRegistries, inputAttrPath); + return getFlake(state, ref, useRegistriesInputs, inputAttrPath, true); } }; @@ -630,7 +636,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, didn't change and there is no override from a higher level flake. */ auto childNode = make_ref( - oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake, oldLock->parentInputAttrPath); + oldLock->lockedRef, + oldLock->originalRef, + oldLock->isFlake, + oldLock->buildTime, + oldLock->parentInputAttrPath); node->inputs.insert_or_assign(id, childNode); @@ -719,12 +729,34 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto inputIsOverride = explicitCliOverrides.contains(nonEmptyInputAttrPath); auto ref = (input2.ref && inputIsOverride) ? *input2.ref : *input.ref; + /* Warn against the use of indirect flakerefs + (but only at top-level since we don't want + to annoy users about flakes that are not + under their control). */ + auto warnRegistry = [&](const FlakeRef & resolvedRef) { + if (inputAttrPath.size() == 1 && !input.ref->input.isDirect()) { + std::ostringstream s; + printLiteralString(s, resolvedRef.to_string()); + warn( + "Flake input '%1%' uses the flake registry. " + "Using the registry in flake inputs is deprecated in Determinate Nix. " + "To make your flake future-proof, add the following to '%2%':\n" + "\n" + " inputs.%1%.url = %3%;\n" + "\n" + "For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37", + inputAttrPathS, + flake.path, + s.str()); + } + }; + if (input.isFlake) { auto inputFlake = getInputFlake( *input.ref, inputIsOverride ? fetchers::UseRegistries::All : useRegistriesInputs); - auto childNode = - make_ref(inputFlake.lockedRef, ref, true, overriddenParentPath); + auto childNode = make_ref( + inputFlake.lockedRef, ref, true, input.buildTime, overriddenParentPath); node->inputs.insert_or_assign(id, childNode); @@ -746,6 +778,8 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, inputAttrPath, inputFlake.path, false); + + warnRegistry(inputFlake.resolvedRef); } else { @@ -757,16 +791,21 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto cachedInput = state.inputCache->getAccessor( state.fetchSettings, *state.store, input.ref->input, useRegistriesInputs); + auto resolvedRef = + FlakeRef(std::move(cachedInput.resolvedInput), input.ref->subdir); auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); + warnRegistry(resolvedRef); + return { - state.storePath( - state.mountInput(lockedRef.input, input.ref->input, cachedInput.accessor)), + state.storePath(state.mountInput( + lockedRef.input, input.ref->input, cachedInput.accessor, true, true)), lockedRef}; } }(); - auto childNode = make_ref(lockedRef, ref, false, overriddenParentPath); + auto childNode = + make_ref(lockedRef, ref, false, input.buildTime, overriddenParentPath); nodePaths.emplace(childNode, path); @@ -884,7 +923,7 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, repo, so we should re-read it. FIXME: we could also just clear the 'rev' field... */ auto prevLockedRef = flake.lockedRef; - flake = getFlake(state, topRef, useRegistriesTop); + flake = getFlake(state, topRef, useRegistriesTop, lockFlags.requireLockable); if (lockFlags.commitLockFile && flake.lockedRef.input.getRev() && prevLockedRef.input.getRev() != flake.lockedRef.input.getRev()) @@ -908,6 +947,23 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, } } +LockedFlake +lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) +{ + auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); + auto useRegistriesTop = useRegistries ? fetchers::UseRegistries::All : fetchers::UseRegistries::No; + + return lockFlake(settings, state, topRef, lockFlags, getFlake(state, topRef, useRegistriesTop, {}, false)); +} + +LockedFlake +lockFlake(const Settings & settings, EvalState & state, const SourcePath & flakeDir, const LockFlags & lockFlags) +{ + /* We need a fake flakeref to put in the `Flake` struct, but it's not used for anything. */ + auto fakeRef = parseFlakeRef(state.fetchSettings, "flake:get-flake"); + return lockFlake(settings, state, fakeRef, lockFlags, readFlake(state, fakeRef, fakeRef, fakeRef, flakeDir, {})); +} + static ref makeInternalFS() { auto internalFS = make_ref(MemorySourceAccessor{}); @@ -931,8 +987,6 @@ static Value * requireInternalFile(EvalState & state, CanonPath path) void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) { - experimentalFeatureSettings.require(Xp::Flakes); - auto [lockFileStr, keyMap] = lockedFlake.lockFile.to_string(); auto overrides = state.buildBindings(lockedFlake.nodePaths.size()); @@ -969,10 +1023,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) auto vLocks = state.allocValue(); vLocks->mkString(lockFileStr, state.mem); - auto vFetchFinalTree = get(state.internalPrimOps, "fetchFinalTree"); - assert(vFetchFinalTree); - - Value * args[] = {vLocks, &vOverrides, *vFetchFinalTree}; + Value * args[] = {vLocks, &vOverrides}; state.callFunction(*vCallFlake, args, vRes, noPos); } @@ -1003,41 +1054,6 @@ std::optional LockedFlake::getFingerprint(Store & store, const fetc Flake::~Flake() {} -ref openEvalCache(EvalState & state, ref lockedFlake) -{ - auto fingerprint = state.settings.useEvalCache && state.settings.pureEval - ? lockedFlake->getFingerprint(*state.store, state.fetchSettings) - : std::nullopt; - auto rootLoader = [&state, lockedFlake]() { - /* For testing whether the evaluation cache is - complete. */ - if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") - throw Error("not everything is cached, but evaluation is not allowed"); - - auto vFlake = state.allocValue(); - callFlake(state, *lockedFlake, *vFlake); - - state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); - - auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - return aOutputs->value; - }; - - if (fingerprint) { - auto search = state.evalCaches.find(fingerprint.value()); - if (search == state.evalCaches.end()) { - search = state.evalCaches - .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) - .first; - } - return search->second; - } else { - return make_ref(std::nullopt, state, rootLoader); - } -} - } // namespace flake } // namespace nix diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index c35040adb5d8..4fc97ac53a98 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -26,6 +26,7 @@ #include "nix/store/outputs-spec.hh" #include "nix/util/ref.hh" #include "nix/util/types.hh" +#include "nix/fetchers/fetch-settings.hh" namespace nix { class Store; @@ -42,12 +43,12 @@ const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; #endif -std::string FlakeRef::to_string() const +std::string FlakeRef::to_string(bool abbreviate) const { StringMap extraQuery; if (subdir != "") extraQuery.insert_or_assign("dir", subdir); - return input.toURLString(extraQuery); + return input.toURLString(extraQuery, abbreviate); } fetchers::Attrs FlakeRef::toAttrs() const @@ -90,7 +91,8 @@ static std::pair fromParsedURL(const fetchers::Settings & fetchSettings, ParsedURL && parsedURL, bool isFlake) { auto dir = getOr(parsedURL.query, "dir", ""); - parsedURL.query.erase("dir"); + if (!fetchSettings.nix219Compat) + parsedURL.query.erase("dir"); std::string fragment; std::swap(fragment, parsedURL.fragment); diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index 3bee6556f643..7156261ceb50 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -10,6 +10,7 @@ namespace nix { class EvalState; +struct Provenance; namespace flake { @@ -44,12 +45,18 @@ typedef std::map FlakeInputs; struct FlakeInput { std::optional ref; + /** - * true = process flake to get outputs - * - * false = (fetched) static source path + * Whether to call the `flake.nix` file in this input to get its outputs. */ bool isFlake = true; + + /** + * Whether to fetch this input at evaluation time or at build + * time. + */ + bool buildTime = false; + std::optional follows; FlakeInputs overrides; }; @@ -88,6 +95,11 @@ struct Flake */ SourcePath path; + /** + * Cached provenance of `flake.nix` (equivalent to `path.getProvenance()`). + */ + std::shared_ptr provenance; + /** * Pretend that `lockedRef` is dirty. */ @@ -116,7 +128,8 @@ struct Flake } }; -Flake getFlake(EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries); +Flake getFlake( + EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries, bool requireLockable = true); /** * Fingerprint of a locked flake; used as a cache key. @@ -212,17 +225,39 @@ struct LockFlags * for those inputs will be ignored. */ std::set inputUpdates; + + /** + * Whether to require a locked input. + */ + bool requireLockable = true; }; +/** + * Return a `Flake` object representing the flake read from the + * `flake.nix` file in `rootDir`. + */ +Flake readFlake( + EvalState & state, + const FlakeRef & originalRef, + const FlakeRef & resolvedRef, + const FlakeRef & lockedRef, + const SourcePath & rootDir, + const InputAttrPath & lockRootPath); + +/* + * Compute an in-memory lock file for the specified top-level flake, and optionally write it to file, if the flake is + * writable. + */ LockedFlake lockFlake(const Settings & settings, EvalState & state, const FlakeRef & flakeRef, const LockFlags & lockFlags); -void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); +LockedFlake lockFlake( + const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags, Flake flake); -/** - * Open an evaluation cache for a flake. - */ -ref openEvalCache(EvalState & state, ref lockedFlake); +LockedFlake +lockFlake(const Settings & settings, EvalState & state, const SourcePath & flakeDir, const LockFlags & lockFlags); + +void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); } // namespace flake @@ -234,11 +269,4 @@ void emitTreeAttrs( bool emptyRevFallback = false, bool forceDirty = false); -/** - * An internal builtin similar to `fetchTree`, except that it - * always treats the input as final (i.e. no attributes can be - * added/removed/changed). - */ -void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v); - } // namespace nix diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index b557433a9fb8..1f39d62ebbf9 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -68,8 +68,7 @@ struct FlakeRef { } - // FIXME: change to operator <<. - std::string to_string() const; + std::string to_string(bool abbreviate = false) const; fetchers::Attrs toAttrs() const; diff --git a/src/libflake/include/nix/flake/lockfile.hh b/src/libflake/include/nix/flake/lockfile.hh index 89029aec491c..27232b20a669 100644 --- a/src/libflake/include/nix/flake/lockfile.hh +++ b/src/libflake/include/nix/flake/lockfile.hh @@ -111,6 +111,7 @@ struct LockedNode : Node { FlakeRef lockedRef, originalRef; bool isFlake = true; + bool buildTime = false; /* The node relative to which relative source paths (e.g. 'path:../foo') are interpreted. */ @@ -120,10 +121,12 @@ struct LockedNode : Node const FlakeRef & lockedRef, const FlakeRef & originalRef, bool isFlake = true, + bool buildTime = false, std::optional parentInputAttrPath = {}) : lockedRef(std::move(lockedRef)) , originalRef(std::move(originalRef)) , isFlake(isFlake) + , buildTime(buildTime) , parentInputAttrPath(std::move(parentInputAttrPath)) { } diff --git a/src/libflake/include/nix/flake/meson.build b/src/libflake/include/nix/flake/meson.build index fc580164eaec..fbe54f41208b 100644 --- a/src/libflake/include/nix/flake/meson.build +++ b/src/libflake/include/nix/flake/meson.build @@ -6,6 +6,7 @@ headers = files( 'flake.hh', 'flakeref.hh', 'lockfile.hh', + 'provenance.hh', 'settings.hh', 'url-name.hh', ) diff --git a/src/libflake/include/nix/flake/provenance.hh b/src/libflake/include/nix/flake/provenance.hh new file mode 100644 index 000000000000..011744f5e65d --- /dev/null +++ b/src/libflake/include/nix/flake/provenance.hh @@ -0,0 +1,21 @@ +#pragma once + +#include "nix/util/provenance.hh" + +namespace nix { + +struct FlakeProvenance : Provenance +{ + std::shared_ptr next; + std::string flakeOutput; + bool pure = true; + + FlakeProvenance(std::shared_ptr next, std::string flakeOutput, bool pure) + : next(std::move(next)) + , flakeOutput(std::move(flakeOutput)) + , pure(pure) {}; + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 7187a3294a34..05b36f5b779c 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -21,13 +21,7 @@ struct Settings : public Config void configureEvalSettings(nix::EvalSettings & evalSettings) const; Setting useRegistries{ - this, - true, - "use-registries", - "Whether to use flake registries to resolve flake references.", - {}, - true, - Xp::Flakes}; + this, true, "use-registries", "Whether to use flake registries to resolve flake references.", {}, true}; Setting acceptFlakeConfig{ this, @@ -35,8 +29,7 @@ struct Settings : public Config "accept-flake-config", "Whether to accept Nix configuration settings from a flake without prompting.", {}, - true, - Xp::Flakes}; + true}; Setting commitLockFileSummary{ this, @@ -47,8 +40,7 @@ struct Settings : public Config empty, the summary is generated based on the action performed. )", {"commit-lockfile-summary"}, - true, - Xp::Flakes}; + true}; }; } // namespace nix::flake diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index 4eeba3bd3ef1..dc5b79ffe510 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -71,6 +71,7 @@ LockedNode::LockedNode(const fetchers::Settings & fetchSettings, const nlohmann: : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) + , buildTime(json.find("buildTime") != json.end() ? (bool) json["buildTime"] : false) , parentInputAttrPath( json.find("parent") != json.end() ? (std::optional) json["parent"] : std::nullopt) { @@ -229,13 +230,11 @@ std::pair LockFile::toJSON() const if (auto lockedNode = node.dynamic_pointer_cast()) { n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs()); n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs()); - /* For backward compatibility, omit the "__final" - attribute. We never allow non-final inputs in lock files - anyway. */ assert(lockedNode->lockedRef.input.isFinal() || lockedNode->lockedRef.input.isRelative()); - n["locked"].erase("__final"); if (!lockedNode->isFlake) n["flake"] = false; + if (lockedNode->buildTime) + n["buildTime"] = true; if (lockedNode->parentInputAttrPath) n["parent"] = *lockedNode->parentInputAttrPath; } @@ -352,7 +351,7 @@ std::map LockFile::getAllInputs() const static std::string describe(const FlakeRef & flakeRef) { - auto s = fmt("'%s'", flakeRef.to_string()); + auto s = fmt("'%s'", flakeRef.to_string(true)); if (auto lastModified = flakeRef.input.getLastModified()) s += fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%Y-%m-%d")); diff --git a/src/libflake/meson.build b/src/libflake/meson.build index 58916ecd9ab2..516ef7ff3383 100644 --- a/src/libflake/meson.build +++ b/src/libflake/meson.build @@ -45,6 +45,7 @@ sources = files( 'flake.cc', 'flakeref.cc', 'lockfile.cc', + 'provenance.cc', 'settings.cc', 'url-name.cc', ) @@ -62,7 +63,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libflake/package.nix b/src/libflake/package.nix index dd442a44ec9a..2b0c827a09ce 100644 --- a/src/libflake/package.nix +++ b/src/libflake/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake"; + pname = "determinate-nix-flake"; inherit version; workDir = ./.; diff --git a/src/libflake/provenance.cc b/src/libflake/provenance.cc new file mode 100644 index 000000000000..c80c4154561a --- /dev/null +++ b/src/libflake/provenance.cc @@ -0,0 +1,28 @@ +#include "nix/flake/provenance.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +nlohmann::json FlakeProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "flake"}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + {"flakeOutput", flakeOutput}, + {"pure", pure}}; +} + +Provenance::Register registerFlakeProvenance("flake", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + bool pure = true; + if (auto p = optionalValueAt(obj, "pure")) + pure = getBoolean(*p); + return make_ref(next, getString(valueAt(obj, "flakeOutput")), pure); +}); + +} // namespace nix diff --git a/src/libmain-c/meson.build b/src/libmain-c/meson.build index 36332fdb70a1..dd02c20a4c67 100644 --- a/src/libmain-c/meson.build +++ b/src/libmain-c/meson.build @@ -49,7 +49,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libmain-c/package.nix b/src/libmain-c/package.nix index f019a917d360..17858d56f2e5 100644 --- a/src/libmain-c/package.nix +++ b/src/libmain-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main-c"; + pname = "determinate-nix-main-c"; inherit version; workDir = ./.; diff --git a/src/libmain/include/nix/main/shared.hh b/src/libmain/include/nix/main/shared.hh index f9e771205ba2..19be9a04cf39 100644 --- a/src/libmain/include/nix/main/shared.hh +++ b/src/libmain/include/nix/main/shared.hh @@ -26,6 +26,8 @@ void parseCmdLine( const Strings & args, fun parseArg); +std::string version(); + void printVersion(const std::string & programName); /** diff --git a/src/libmain/meson.build b/src/libmain/meson.build index 2ac59924e592..d9d5e9362d5e 100644 --- a/src/libmain/meson.build +++ b/src/libmain/meson.build @@ -81,7 +81,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libmain/package.nix b/src/libmain/package.nix index 7b0a4dee7dad..119e1f1aca59 100644 --- a/src/libmain/package.nix +++ b/src/libmain/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main"; + pname = "determinate-nix-main"; inherit version; workDir = ./.; diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index a973102f9509..05fd89827869 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -51,6 +51,7 @@ class ProgressBar : public Logger ActivityId parent; std::optional name; std::chrono::time_point startTime; + bool logged = false; }; struct ActivitiesByType @@ -142,8 +143,14 @@ class ProgressBar : public Logger return; } - if (state->active) + if (state->active) { writeToStderr("\r\e[K"); + /* Show activities that were previously only shown on the + progress bar. Otherwise the user won't know what's + happening. */ + for (auto & act : state->activities) + logActivity(*state, lvlNotice, act); + } } void resume() override @@ -196,6 +203,14 @@ class ProgressBar : public Logger } } + void logActivity(State & state, Verbosity lvl, ActInfo & act) + { + if (!act.logged && lvl <= verbosity && !act.s.empty() && act.type != actBuildWaiting) { + log(state, lvl, act.s + "..."); + act.logged = true; + } + } + void startActivity( ActivityId act, Verbosity lvl, @@ -206,15 +221,14 @@ class ProgressBar : public Logger { auto state(state_.lock()); - if (lvl <= verbosity && !s.empty() && type != actBuildWaiting) - log(*state, lvl, s + "..."); - state->activities.emplace_back( ActInfo{.s = s, .type = type, .parent = parent, .startTime = std::chrono::steady_clock::now()}); auto i = std::prev(state->activities.end()); state->its.emplace(act, i); state->activitiesByType[type].its.emplace(act, i); + logActivity(*state, lvl, *i); + if (type == actBuild) { std::string name(storePathToName(getS(fields, 0))); if (hasSuffix(name, ".drv")) @@ -456,11 +470,7 @@ class ProgressBar : public Logger } } - auto width = getWindowSize().second; - if (width <= 0) - width = std::numeric_limits::max(); - - redraw("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"); + redraw("\r" + filterANSIEscapes(line, false, getWindowWidth()) + ANSI_NORMAL + "\e[K"); return nextWakeup; } diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 8a74484d451a..ef26c50a58fc 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -325,9 +325,14 @@ void parseCmdLine( LegacyArgs(programName, parseArg).parseCmdline(args); } +std::string version() +{ + return fmt("(Determinate Nix %s) %s", determinateNixVersion, nixVersion); +} + void printVersion(const std::string & programName) { - std::cout << fmt("%1% (Nix) %2%", programName, nixVersion) << std::endl; + std::cout << fmt("%s %s", programName, version()) << std::endl; if (verbosity > lvlInfo) { Strings cfg; #if NIX_USE_BOEHMGC diff --git a/src/libmain/unix/stack.cc b/src/libmain/unix/stack.cc index bec0d389f5ed..e049bae50de1 100644 --- a/src/libmain/unix/stack.cc +++ b/src/libmain/unix/stack.cc @@ -10,6 +10,8 @@ namespace nix { +static struct sigaction savedSigsegvAction; + static void sigsegvHandler(int signo, siginfo_t * info, void * ctx) { /* Detect stack overflows by comparing the faulting address with @@ -34,12 +36,8 @@ static void sigsegvHandler(int signo, siginfo_t * info, void * ctx) } } - /* Restore default behaviour (i.e. segfault and dump core). */ - struct sigaction act; - sigfillset(&act.sa_mask); - act.sa_handler = SIG_DFL; - act.sa_flags = 0; - if (sigaction(SIGSEGV, &act, 0)) + /* Restore the original SIGSEGV handler. */ + if (sigaction(SIGSEGV, &savedSigsegvAction, 0)) abort(); } @@ -63,7 +61,7 @@ void detectStackOverflow() sigfillset(&act.sa_mask); act.sa_sigaction = sigsegvHandler; act.sa_flags = SA_SIGINFO | SA_ONSTACK; - if (sigaction(SIGSEGV, &act, 0)) + if (sigaction(SIGSEGV, &act, &savedSigsegvAction)) throw SysError("resetting SIGSEGV"); #endif } diff --git a/src/libstore-c/meson.build b/src/libstore-c/meson.build index c81235bf16d4..fd5d4990efba 100644 --- a/src/libstore-c/meson.build +++ b/src/libstore-c/meson.build @@ -52,7 +52,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index fbb3c418566c..f0d8aeab3aa1 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -289,6 +289,29 @@ nix_derivation * nix_derivation_from_json(nix_c_context * context, Store * store NIXC_CATCH_ERRS_NULL } +nix_err nix_derivation_make_outputs( + nix_c_context * context, + Store * store, + const char * json, + void (*callback)(void * userdata, const char * output_name, const char * path), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto drv = nix::Derivation::parseJsonAndValidate(*store->ptr, nlohmann::json::parse(json)); + + for (auto & output : drv.outputs) { + auto outPath = output.second.path(*store->ptr, drv.name, output.first); + + if (callback && outPath) { + callback(userdata, output.first.c_str(), store->ptr->printStorePath(*outPath).c_str()); + } + } + } + NIXC_CATCH_ERRS +} + nix_err nix_derivation_to_json( nix_c_context * context, const nix_derivation * drv, nix_get_string_callback callback, void * userdata) { @@ -342,6 +365,96 @@ nix_derivation * nix_store_drv_from_store_path(nix_c_context * context, Store * NIXC_CATCH_ERRS_NULL } +nix_err nix_store_drv_from_path( + nix_c_context * context, + Store * store, + const StorePath * path, + void (*callback)(void * userdata, const nix_derivation * drv), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + nix::Derivation drv = store->ptr->derivationFromPath(path->path); + if (callback) { + const nix_derivation tmp{drv, store}; + callback(userdata, &tmp); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_store_query_path_info( + nix_c_context * context, + Store * store, + const StorePath * store_path, + void * userdata, + nix_get_string_callback callback) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto info = store->ptr->queryPathInfo(store_path->path); + if (callback) { + auto result = info->toJSON(&store->ptr->config, true, nix::PathInfoJsonFormat::V1).dump(); + callback(result.data(), result.size(), userdata); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_store_build_paths( + nix_c_context * context, + Store * store, + const StorePath ** store_paths, + unsigned int num_store_paths, + void (*callback)(void * userdata, const char * path, const char * result), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + std::vector derived_paths; + for (size_t i = 0; i < num_store_paths; i++) { + const StorePath * store_path = store_paths[i]; + derived_paths.push_back(nix::SingleDerivedPath::Opaque{store_path->path}); + } + + auto results = store->ptr->buildPathsWithResults(derived_paths); + for (auto & result : results) { + if (callback) + callback( + userdata, result.path.to_string(store->ptr->config).c_str(), nlohmann::json(result).dump().c_str()); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_derivation_get_outputs_and_optpaths( + nix_c_context * context, + const nix_derivation * drv, + const Store * store, + void (*callback)(void * userdata, const char * name, const StorePath * path), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto value = drv->drv.outputsAndOptPaths(store->ptr->config); + if (callback) { + for (const auto & [name, result] : value) { + if (auto store_path = result.second) { + const StorePath tmp_path{*store_path}; + callback(userdata, name.c_str(), &tmp_path); + } else { + callback(userdata, name.c_str(), nullptr); + } + } + } + } + NIXC_CATCH_ERRS +} + StorePath * nix_store_query_path_from_hash_part(nix_c_context * context, Store * store, const char * hash) { if (context) diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index e60a7d6c0f7c..964c4066154b 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -201,6 +201,22 @@ nix_store_get_version(nix_c_context * context, Store * store, nix_get_string_cal */ nix_derivation * nix_derivation_from_json(nix_c_context * context, Store * store, const char * json); +/** + * @brief Hashes the derivation and gives the output paths + * + * @param[in] context Optional, stores error information. + * @param[in] store nix store reference. + * @param[in] json JSON of the derivation as a string. + * @param[in] callback Called for every output to provide the output path. + * @param[in] userdata User data to pass to the callback. + */ +nix_err nix_derivation_make_outputs( + nix_c_context * context, + Store * store, + const char * json, + void (*callback)(void * userdata, const char * output_name, const char * path), + void * userdata); + /** * @brief Add the given `nix_derivation` to the given store * @@ -259,6 +275,88 @@ nix_err nix_store_get_fs_closure( */ nix_derivation * nix_store_drv_from_store_path(nix_c_context * context, Store * store, const StorePath * path); +/** + * @note The callback borrows the Derivation only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] store The nix store + * @param[in] path The nix store path + * @param[in] callback The callback to call + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_store_drv_from_path( + nix_c_context * context, + Store * store, + const StorePath * path, + void (*callback)(void * userdata, const nix_derivation * drv), + void * userdata); + +/** + * @brief Queries for the nix store path info. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_path A store path + * @param[in] userdata The data to pass to the callback + * @param[in] callback Called for when the path info is resolved + */ +nix_err nix_store_query_path_info( + nix_c_context * context, + Store * store, + const StorePath * store_path, + void * userdata, + nix_get_string_callback callback); + +/** + * @brief Builds the paths, if they are a derivation then they get built. + * + * @note Path and result for the callback only exist for the lifetime of + * the call. Result is a string containing the build result in JSON. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_paths Pointer to list of nix store paths + * @param[in] num_store_paths Number of nix store paths + * @param[in] callback The callback to trigger for build results + * @param[in] userdata User data to pass to the callback + */ +nix_err nix_store_build_paths( + nix_c_context * context, + Store * store, + const StorePath ** store_paths, + unsigned int num_store_paths, + void (*callback)(void * userdata, const char * path, const char * result), + void * userdata); + +/** + * @brief Iterate and get all of the store paths for each output. + * + * @note The callback borrows the StorePath only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] drv The derivation + * @param[in] store The nix store + * @param[in] callback The function to call on every output and store path + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_derivation_get_outputs_and_optpaths( + nix_c_context * context, + const nix_derivation * drv, + const Store * store, + void (*callback)(void * userdata, const char * name, const StorePath * path), + void * userdata); + +/** + * @brief Gets the derivation as a JSON string + * + * @param[out] context Optional, stores error information + * @param[in] drv The derivation + * @param[in] callback Called with the JSON string + * @param[in] userdata Arbitrary data passed to the callback + */ +nix_err nix_derivation_to_json( + nix_c_context * context, const nix_derivation * drv, nix_get_string_callback callback, void * userdata); + /** * @brief Query the full store path given the hash part of a valid store * path, or empty if no matching path is found. diff --git a/src/libstore-c/nix_api_store_internal.h b/src/libstore-c/nix_api_store_internal.h index 712d96488a57..0199628da8a9 100644 --- a/src/libstore-c/nix_api_store_internal.h +++ b/src/libstore-c/nix_api_store_internal.h @@ -18,6 +18,7 @@ struct StorePath struct nix_derivation { nix::Derivation drv; + Store * store; }; } // extern "C" diff --git a/src/libstore-c/package.nix b/src/libstore-c/package.nix index fde17c78e017..0ce37e44c012 100644 --- a/src/libstore-c/package.nix +++ b/src/libstore-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-c"; + pname = "determinate-nix-store-c"; inherit version; workDir = ./.; diff --git a/src/libstore-test-support/meson.build b/src/libstore-test-support/meson.build index 4d904cb1d06a..c508eeaecc70 100644 --- a/src/libstore-test-support/meson.build +++ b/src/libstore-test-support/meson.build @@ -56,7 +56,7 @@ this_library = library( # TODO: Remove `-lrapidcheck` when https://github.com/emil-e/rapidcheck/pull/326 # is available. See also ../libutil/build.meson link_args : linker_export_flags + [ '-lrapidcheck' ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libstore-test-support/package.nix b/src/libstore-test-support/package.nix index 4f20f2cbe716..bd5a4c0aa7a0 100644 --- a/src/libstore-test-support/package.nix +++ b/src/libstore-test-support/package.nix @@ -19,7 +19,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-test-support"; + pname = "determinate-nix-store-test-support"; inherit version; workDir = ./.; diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index 9bfbf8453a9e..f35d6ddf36f3 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -956,4 +956,55 @@ TEST_F(nix_api_store_test, nix_derivation_clone) nix_derivation_free(drv2); } +TEST_F(nix_api_store_test, nix_store_build_paths) +{ + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.getWorkerSettings().substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace the hardcoded system with the current system + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + // Realise the derivation - capture the order outputs are returned + std::map outputs; + std::vector output_order; + auto cb = LambdaAdapter{.fun = [&](const char * path, const char * result) { + ASSERT_NE(path, nullptr); + ASSERT_NE(result, nullptr); + output_order.push_back(path); + outputs.emplace(path, result); + }}; + + std::vector paths = {drvPath}; + + auto ret = nix_store_build_paths( + ctx, + store, + const_cast(paths.data()), + paths.size(), + decltype(cb)::call_void, + static_cast(&cb)); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + ASSERT_EQ(outputs.size(), 1); + + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + } // namespace nixC diff --git a/src/libstore/active-builds.cc b/src/libstore/active-builds.cc new file mode 100644 index 000000000000..d165b6d7d4eb --- /dev/null +++ b/src/libstore/active-builds.cc @@ -0,0 +1,152 @@ +#include "nix/store/active-builds.hh" +#include "nix/util/json-utils.hh" + +#include + +#ifndef _WIN32 +# include +#endif + +namespace nix { + +UserInfo UserInfo::fromUid(uid_t uid) +{ + UserInfo info; + info.uid = uid; + +#ifndef _WIN32 + // Look up the user name for the UID (thread-safe) + struct passwd pwd; + struct passwd * result; + std::vector buf(16384); + if (getpwuid_r(uid, &pwd, buf.data(), buf.size(), &result) == 0 && result) + info.name = result->pw_name; +#endif + + return info; +} + +} // namespace nix + +namespace nlohmann { + +using namespace nix; + +UserInfo adl_serializer::from_json(const json & j) +{ + return UserInfo{ + .uid = j.at("uid").get(), + .name = j.contains("name") && !j.at("name").is_null() + ? std::optional(j.at("name").get()) + : std::nullopt, + }; +} + +void adl_serializer::to_json(json & j, const UserInfo & info) +{ + j = nlohmann::json{ + {"uid", info.uid}, + {"name", info.name}, + }; +} + +// Durations are serialized as floats representing seconds. +static std::optional parseDuration(const json & j, const char * key) +{ + if (j.contains(key) && !j.at(key).is_null()) + return std::chrono::duration_cast( + std::chrono::duration(j.at(key).get())); + else + return std::nullopt; +} + +static nlohmann::json printDuration(const std::optional & duration) +{ + return duration + ? nlohmann::json( + std::chrono::duration_cast>(*duration) + .count()) + : nullptr; +} + +ActiveBuildInfo::ProcessInfo adl_serializer::from_json(const json & j) +{ + return ActiveBuildInfo::ProcessInfo{ + .pid = j.at("pid").get(), + .parentPid = j.at("parentPid").get(), + .user = j.at("user").get(), + .argv = j.at("argv").get>(), + .utime = parseDuration(j, "utime"), + .stime = parseDuration(j, "stime"), + .cutime = parseDuration(j, "cutime"), + .cstime = parseDuration(j, "cstime"), + }; +} + +void adl_serializer::to_json(json & j, const ActiveBuildInfo::ProcessInfo & process) +{ + j = nlohmann::json{ + {"pid", process.pid}, + {"parentPid", process.parentPid}, + {"user", process.user}, + {"argv", process.argv}, + {"utime", printDuration(process.utime)}, + {"stime", printDuration(process.stime)}, + {"cutime", printDuration(process.cutime)}, + {"cstime", printDuration(process.cstime)}, + }; +} + +ActiveBuild adl_serializer::from_json(const json & j) +{ + auto type = j.at("type").get(); + if (type != "build") + throw Error("invalid active build JSON: expected type 'build' but got '%s'", type); + std::optional cgroup; + if (!j.at("cgroup").is_null()) + cgroup = j.at("cgroup").get(); + return ActiveBuild{ + .nixPid = j.at("nixPid").get(), + .clientPid = j.at("clientPid").get>(), + .clientUid = j.at("clientUid").get>(), + .mainPid = j.at("mainPid").get(), + .mainUser = j.at("mainUser").get(), + .cgroup = std::move(cgroup), + .startTime = (time_t) j.at("startTime").get(), + .derivation = StorePath{getString(j.at("derivation"))}, + }; +} + +void adl_serializer::to_json(json & j, const ActiveBuild & build) +{ + j = nlohmann::json{ + {"type", "build"}, + {"nixPid", build.nixPid}, + {"clientPid", build.clientPid}, + {"clientUid", build.clientUid}, + {"mainPid", build.mainPid}, + {"mainUser", build.mainUser}, + {"cgroup", build.cgroup ? nlohmann::json(*build.cgroup) : nlohmann::json(nullptr)}, + {"startTime", (double) build.startTime}, + {"derivation", build.derivation.to_string()}, + }; +} + +ActiveBuildInfo adl_serializer::from_json(const json & j) +{ + ActiveBuildInfo info(adl_serializer::from_json(j)); + info.processes = j.at("processes").get>(); + info.utime = parseDuration(j, "utime"); + info.stime = parseDuration(j, "stime"); + return info; +} + +void adl_serializer::to_json(json & j, const ActiveBuildInfo & build) +{ + adl_serializer::to_json(j, build); + j["processes"] = build.processes; + j["utime"] = printDuration(build.utime); + j["stime"] = printDuration(build.stime); +} + +} // namespace nlohmann diff --git a/src/libstore/async-path-writer.cc b/src/libstore/async-path-writer.cc new file mode 100644 index 000000000000..ede52a146aaf --- /dev/null +++ b/src/libstore/async-path-writer.cc @@ -0,0 +1,180 @@ +#include "nix/store/async-path-writer.hh" +#include "nix/util/archive.hh" +#include "nix/util/provenance.hh" + +#include +#include + +namespace nix { + +struct AsyncPathWriterImpl : AsyncPathWriter +{ + ref store; + + struct Item + { + StorePath storePath; + std::string contents; + std::string name; + Hash hash; + StorePathSet references; + RepairFlag repair; + std::shared_ptr provenance; + std::promise promise; + }; + + struct State + { + std::vector items; + std::unordered_map> futures; + bool quit = false; + }; + + Sync state_; + + std::thread workerThread; + + std::condition_variable wakeupCV; + + AsyncPathWriterImpl(ref store) + : store(store) + { + workerThread = std::thread([&]() { + while (true) { + std::vector items; + + { + auto state(state_.lock()); + while (!state->quit && state->items.empty()) + state.wait(wakeupCV); + if (state->items.empty() && state->quit) + return; + std::swap(items, state->items); + } + + try { + writePaths(items); + for (auto & item : items) + item.promise.set_value(); + } catch (...) { + for (auto & item : items) + item.promise.set_exception(std::current_exception()); + } + } + }); + } + + virtual ~AsyncPathWriterImpl() + { + state_.lock()->quit = true; + wakeupCV.notify_all(); + workerThread.join(); + } + + StorePath addPath( + std::string contents, + std::string name, + StorePathSet references, + RepairFlag repair, + std::shared_ptr provenance) override + { + auto hash = hashString(HashAlgorithm::SHA256, contents); + + auto storePath = store->makeFixedOutputPathFromCA( + name, + TextInfo{ + .hash = hash, + .references = references, + }); + + auto state(state_.lock()); + std::promise promise; + state->futures.insert_or_assign(storePath, promise.get_future()); + state->items.push_back( + Item{ + .storePath = storePath, + .contents = std::move(contents), + .name = std::move(name), + .hash = hash, + .references = std::move(references), + .repair = repair, + .provenance = provenance, + .promise = std::move(promise), + }); + wakeupCV.notify_all(); + + return storePath; + } + + void waitForPath(const StorePath & path) override + { + auto future = ({ + auto state = state_.lock(); + auto i = state->futures.find(path); + if (i == state->futures.end()) + return; + i->second; + }); + future.get(); + } + + void waitForAllPaths() override + { + auto futures = ({ + auto state(state_.lock()); + std::move(state->futures); + }); + for (auto & future : futures) + future.second.get(); + } + + void writePaths(const std::vector & items) + { +// FIXME: addMultipeToStore() shouldn't require a NAR hash. +#if 0 + Store::PathsSource sources; + RepairFlag repair = NoRepair; + + for (auto & item : items) { + ValidPathInfo info{item.storePath, Hash(HashAlgorithm::SHA256)}; + info.references = item.references; + info.ca = ContentAddress { + .method = ContentAddressMethod::Raw::Text, + .hash = item.hash, + }; + if (item.repair) repair = item.repair; + auto source = sinkToSource([&](Sink & sink) + { + dumpString(item.contents, sink); + }); + sources.push_back({std::move(info), std::move(source)}); + } + + Activity act(*logger, lvlDebug, actUnknown, fmt("adding %d paths to the store", items.size())); + + store->addMultipleToStore(std::move(sources), act, repair); +#endif + + for (auto & item : items) { + StringSource source(item.contents); + store->addTempRoot(item.storePath); + auto storePath = store->addToStoreFromDump( + source, + item.storePath.name(), + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + item.references, + item.repair, + item.provenance); + assert(storePath == item.storePath); + } + } +}; + +ref AsyncPathWriter::make(ref store) +{ + return make_ref(store); +} + +} // namespace nix diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 31fe2c6173da..f95d8a866194 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -315,7 +315,8 @@ StorePath BinaryCacheStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { std::optional caHash; std::string nar; @@ -378,6 +379,7 @@ StorePath BinaryCacheStore::addToStoreFromDump( }), nar.hash); info.narSize = nar.numBytesDigested; + info.provenance = provenance; return info; }) ->path; @@ -507,6 +509,7 @@ StorePath BinaryCacheStore::addToStore( }), nar.hash); info.narSize = nar.numBytesDigested; + info.provenance = path.getProvenance(); return info; }) ->path; diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index f01911bcd002..9b854f18cbad 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -1,5 +1,7 @@ #include "nix/store/build-result.hh" #include "nix/util/json-utils.hh" +#include "nix/util/provenance.hh" + #include namespace nix { @@ -90,7 +92,7 @@ static BuildResult::Success::Status successStatusFromString(std::string_view str throw Error("unknown built result success status '%s'", str); } -static constexpr std::array, 12> failureStatusStrings{{ +static constexpr std::array, 13> failureStatusStrings{{ #define ENUM_ENTRY(e) {BuildResult::Failure::e, #e} ENUM_ENTRY(PermanentFailure), ENUM_ENTRY(InputRejected), @@ -104,6 +106,7 @@ static constexpr std::array::to_json(json & res, const BuildResult & br) res["success"] = true; res["status"] = successStatusToString(success.status); res["builtOutputs"] = success.builtOutputs; + if (success.provenance) + res["provenance"] = success.provenance->to_json(); }, [&](const BuildResult::Failure & failure) { res["success"] = false; res["status"] = failureStatusToString(failure.status); res["errorMsg"] = failure.message(); res["isNonDeterministic"] = failure.isNonDeterministic; + if (failure.provenance) + res["provenance"] = failure.provenance->to_json(); }, }, br.inner); @@ -201,17 +208,24 @@ BuildResult adl_serializer::from_json(const json & _json) bool success = getBoolean(valueAt(json, "success")); std::string statusStr = getString(valueAt(json, "status")); + auto provenanceFromJson = [](const nlohmann::json * j) -> std::shared_ptr { + if (j && !j->is_null()) + return Provenance::from_json(*j); + return nullptr; + }; + if (success) { BuildResult::Success s; s.status = successStatusFromString(statusStr); s.builtOutputs = valueAt(json, "builtOutputs"); + s.provenance = provenanceFromJson(optionalValueAt(json, "provenance")); br.inner = std::move(s); } else { - br.inner = BuildResult::Failure{{ - .status = failureStatusFromString(statusStr), - .msg = HintFmt(getString(valueAt(json, "errorMsg"))), - .isNonDeterministic = getBoolean(valueAt(json, "isNonDeterministic")), - }}; + br.inner = BuildResult::Failure{ + {.status = failureStatusFromString(statusStr), + .msg = HintFmt(getString(valueAt(json, "errorMsg"))), + .isNonDeterministic = getBoolean(valueAt(json, "isNonDeterministic")), + .provenance = provenanceFromJson(optionalValueAt(json, "provenance"))}}; } return br; diff --git a/src/libstore/build/build-log.cc b/src/libstore/build/build-log.cc index a8fb64fc68bf..85b8877d6590 100644 --- a/src/libstore/build/build-log.cc +++ b/src/libstore/build/build-log.cc @@ -2,7 +2,7 @@ namespace nix { -BuildLog::BuildLog(size_t maxTailLines, std::unique_ptr act) +BuildLog::BuildLog(size_t maxTailLines, ref act) : maxTailLines(maxTailLines) , act(std::move(act)) { diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 60ca5492c168..685624bb6790 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -348,8 +348,8 @@ Goal::Co DerivationBuildingGoal::tryToBuild(StorePathSet inputPaths) WrongLocalStore wrongStore; - if (drv->platform != settings.thisSystem.get() && !settings.extraPlatforms.get().count(drv->platform) - && !drv->isBuiltin()) + if (drv->platform != settings.thisSystem.get() && drv->platform != "wasm32-wasip1" + && !settings.extraPlatforms.get().count(drv->platform) && !drv->isBuiltin()) wrongStore.badPlatform = WrongLocalStore::Pair{drv->platform, settings.thisSystem.get()}; { @@ -634,7 +634,7 @@ Goal::Co DerivationBuildingGoal::buildWithHook( std::unique_ptr buildLog = std::make_unique( worker.settings.logLines, - std::make_unique( + make_ref( *logger, lvlInfo, actBuild, @@ -787,6 +787,13 @@ Goal::Co DerivationBuildingGoal::buildLocally( #ifdef _WIN32 // TODO enable `DerivationBuilder` on Windows throw UnimplementedError("building derivations is not yet implemented on Windows"); #else + auto msg = + fmt(buildMode == bmRepair ? "repairing outputs of '%s'" + : buildMode == bmCheck ? "checking outputs of '%s'" + : "building '%s'", + worker.store.printStorePath(drvPath)); + auto act = make_ref( + *logger, lvlInfo, actBuild, msg, Logger::Fields{worker.store.printStorePath(drvPath), "", 1, 1}); std::unique_ptr buildLog; std::unique_ptr logFile; @@ -797,15 +804,7 @@ Goal::Co DerivationBuildingGoal::buildLocally( auto closeLogFile = [&]() { logFile.reset(); }; auto started = [&]() { - auto msg = - fmt(buildMode == bmRepair ? "repairing outputs of '%s'" - : buildMode == bmCheck ? "checking outputs of '%s'" - : "building '%s'", - worker.store.printStorePath(drvPath)); - buildLog = std::make_unique( - worker.settings.logLines, - std::make_unique( - *logger, lvlInfo, actBuild, msg, Logger::Fields{worker.store.printStorePath(drvPath), "", 1, 1})); + buildLog = std::make_unique(worker.settings.logLines, act); mcRunningBuilds = std::make_unique>(worker.runningBuilds); worker.updateProgress(); }; @@ -814,6 +813,11 @@ Goal::Co DerivationBuildingGoal::buildLocally( DerivationBuilderUnique builder; Descriptor builderOut; + /* Get the provenance of the derivation, if available. */ + std::shared_ptr provenance; + if (auto info = worker.evalStore.maybeQueryPathInfo(drvPath)) + provenance = info->provenance; + // Will continue here while waiting for a build user below while (true) { @@ -891,6 +895,7 @@ Goal::Co DerivationBuildingGoal::buildLocally( DerivationBuilderParams params{ .drvPath = drvPath, + .drvProvenance = provenance, .buildResult = buildResult, .drv = *drv, .drvOptions = drvOptions, @@ -900,6 +905,7 @@ Goal::Co DerivationBuildingGoal::buildLocally( .defaultPathsInChroot = std::move(defaultPathsInChroot), .systemFeatures = worker.store.config.systemFeatures.get(), .desugaredEnv = std::move(desugaredEnv), + .act = act, }; /* If we have to wait and retry (see below), then `builder` will @@ -1000,7 +1006,7 @@ Goal::Co DerivationBuildingGoal::buildLocally( (unlinked) lock files. */ outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs), provenance); } #endif } @@ -1094,7 +1100,7 @@ BuildError DerivationBuildingGoal::fixupBuilderFailureErrorMessage(BuilderFailur msg += line; msg += "\n"; } - auto nixLogCommand = experimentalFeatureSettings.isEnabled(Xp::NixCommand) ? "nix log" : "nix-store -l"; + auto nixLogCommand = "nix log"; // The command is on a separate line for easy copying, such as with triple click. // This message will be indented elsewhere, so removing the indentation before the // command will not put it at the start of the line unfortunately. @@ -1319,7 +1325,8 @@ DerivationBuildingGoal::checkPathValidity(std::map & return {allValid, validOutputs}; } -Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs) +Goal::Done DerivationBuildingGoal::doneSuccess( + BuildResult::Success::Status status, SingleDrvOutputs builtOutputs, std::shared_ptr provenance) { mcRunningBuilds.reset(); @@ -1328,11 +1335,21 @@ Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status stat worker.updateProgress(); - return Goal::doneSuccess( + auto res = Goal::doneSuccess( BuildResult::Success{ .status = status, .builtOutputs = std::move(builtOutputs), + .provenance = provenance, }); + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + + return res; } Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) @@ -1345,6 +1362,12 @@ Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) worker.updateProgress(); + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + {ex}, DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + return Goal::doneFailure(ecFailed, std::move(ex)); } diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index c422897e624e..d132b24649ec 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -12,7 +12,8 @@ void checkOutputs( const StorePath & drvPath, const decltype(Derivation::outputs) & drvOutputs, const decltype(DerivationOptions::outputChecks) & outputChecks, - const std::map & outputs) + const std::map & outputs, + Activity & act) { std::map outputsByPath; for (auto & output : outputs) @@ -36,6 +37,13 @@ void checkOutputs( if (wanted != got) { /* Throw an error after registering the path as valid. */ + act.result( + resHashMismatch, + { + {"storePath", store.printStorePath(drvPath)}, + {"wanted", wanted}, + {"got", got}, + }); throw BuildError( BuildResult::Failure::HashMismatch, "hash mismatch in fixed-output derivation '%s':\n specified: %s\n got: %s", diff --git a/src/libstore/build/derivation-check.hh b/src/libstore/build/derivation-check.hh index 01e6c5d56383..ee2d01229524 100644 --- a/src/libstore/build/derivation-check.hh +++ b/src/libstore/build/derivation-check.hh @@ -22,6 +22,7 @@ void checkOutputs( const StorePath & drvPath, const decltype(Derivation::outputs) & drvOutputs, const decltype(DerivationOptions::outputChecks) & drvOptions, - const std::map & outputs); + const std::map & outputs, + Activity & act); } // namespace nix diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 4f99928d7850..2877314979fa 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -472,7 +472,7 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Unke worker.updateProgress(); - return Goal::doneSuccess( + auto res = Goal::doneSuccess( BuildResult::Success{ .status = status, .builtOutputs = {{ @@ -486,6 +486,15 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Unke }, }}, }); + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + + return res; } Goal::Done DerivationGoal::doneFailure(BuildError ex) @@ -498,6 +507,12 @@ Goal::Done DerivationGoal::doneFailure(BuildError ex) worker.updateProgress(); + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + {ex}, DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + return Goal::doneFailure(ecFailed, std::move(ex)); } diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index e6d1f62d786a..50d84969a373 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -65,11 +65,6 @@ std::vector Store::buildPathsWithResults( results.reserve(state.size()); for (auto & [req, goalPtr] : state) { - /* Goals that were never started or were cancelled have exitCode - ecBusy and a default buildResult with empty errorMsg. Skip them - to avoid reporting spurious failures with empty messages. */ - if (goalPtr->exitCode == Goal::ecBusy) - continue; results.emplace_back( KeyedBuildResult{ goalPtr->buildResult, diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index d58bec8851d5..d1279d5271a2 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -7,6 +7,8 @@ #include +#include + namespace nix { PathSubstitutionGoal::PathSubstitutionGoal( @@ -26,6 +28,14 @@ PathSubstitutionGoal::~PathSubstitutionGoal() cleanup(); } +Goal::Done PathSubstitutionGoal::doneFailure(ExitCode result, BuildResult::Failure failure) +{ + logger->result( + getCurActivity(), resBuildResult, nlohmann::json(KeyedBuildResult({failure}, DerivedPath::Opaque{storePath}))); + + return Goal::doneFailure(result, std::move(failure)); +} + Goal::Co PathSubstitutionGoal::init() { trace("init"); @@ -210,7 +220,7 @@ Goal::Co PathSubstitutionGoal::tryToRun( outPipe.createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::promise(); + auto promise = std::promise>(); thr = std::thread([this, &promise, &subPath, &sub]() { try { @@ -225,9 +235,8 @@ Goal::Co PathSubstitutionGoal::tryToRun( Logger::Fields{worker.store.printStorePath(storePath), sub->config.getHumanReadableURI()}); PushActivity pact(act.id); - copyStorePath(*sub, worker.store, subPath, repair, sub->config.isTrusted ? NoCheckSigs : CheckSigs); - - promise.set_value(); + promise.set_value( + copyStorePath(*sub, worker.store, subPath, repair, sub->config.isTrusted ? NoCheckSigs : CheckSigs)); } catch (...) { promise.set_exception(std::current_exception()); } @@ -261,8 +270,12 @@ Goal::Co PathSubstitutionGoal::tryToRun( thr.join(); worker.childTerminated(this); + std::shared_ptr provenance; + try { - promise.get_future().get(); + auto info = promise.get_future().get(); + if (info) + provenance = info->provenance; } catch (std::exception & e) { /* Cause the parent build to fail unless --fallback is given, or the substitute has disappeared. The latter case behaves @@ -303,7 +316,12 @@ Goal::Co PathSubstitutionGoal::tryToRun( worker.updateProgress(); - co_return doneSuccess(BuildResult::Success{.status = BuildResult::Success::Substituted}); + auto success = BuildResult::Success{.status = BuildResult::Success::Substituted, .provenance = provenance}; + + logger->result( + getCurActivity(), resBuildResult, nlohmann::json(KeyedBuildResult({success}, DerivedPath::Opaque{storePath}))); + + co_return doneSuccess(std::move(success)); } void PathSubstitutionGoal::cleanup() diff --git a/src/libstore/common-protocol.cc b/src/libstore/common-protocol.cc index a0afe948d649..6e3a1921dc04 100644 --- a/src/libstore/common-protocol.cc +++ b/src/libstore/common-protocol.cc @@ -27,13 +27,13 @@ void CommonProto::Serialise::write( StorePath CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { - return store.parseStorePath(readString(conn.from)); + return conn.shortStorePaths ? StorePath(readString(conn.from)) : store.parseStorePath(readString(conn.from)); } void CommonProto::Serialise::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath) { - conn.to << store.printStorePath(storePath); + conn.to << (conn.shortStorePaths ? storePath.to_string() : store.printStorePath(storePath)); } ContentAddress CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) @@ -79,13 +79,15 @@ std::optional CommonProto::Serialise>::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { auto s = readString(conn.from); - return s == "" ? std::optional{} : store.parseStorePath(s); + return s == "" ? std::optional{} : conn.shortStorePaths ? StorePath(s) : store.parseStorePath(s); } void CommonProto::Serialise>::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional & storePathOpt) { - conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : ""); + conn.to + << (storePathOpt ? (conn.shortStorePaths ? storePathOpt->to_string() : store.printStorePath(*storePathOpt)) + : ""); } std::optional @@ -134,6 +136,8 @@ constexpr static BuildResultStatus buildResultStatusTable[] = { BuildResultFailureStatus::NotDeterministic, // 12 BuildResultSuccessStatus::ResolvesToAlreadyValid, // 13 BuildResultFailureStatus::NoSubstituters, // 14 + BuildResultFailureStatus::HashMismatch, // 15 + BuildResultFailureStatus::Cancelled, // 16 }; BuildResultStatus diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 0e6668bc0b23..31a82b40421d 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -19,6 +19,8 @@ #include "nix/util/git.hh" #include "nix/util/logging.hh" #include "nix/store/globals.hh" +#include "nix/store/active-builds.hh" +#include "nix/util/provenance.hh" #ifndef _WIN32 // TODO need graceful async exit support on Windows? # include "nix/util/monitor-fd.hh" @@ -423,6 +425,9 @@ static void performOp( bool repairBool; conn.from >> repairBool; auto repair = RepairFlag{repairBool}; + auto provenance = conn.protoVersion.features.contains(WorkerProto::featureProvenance) + ? Provenance::from_json_str_optional(readString(conn.from)) + : nullptr; logger->startWork(); auto pathInfo = [&]() { @@ -448,8 +453,8 @@ static void performOp( assert(false); } // TODO these two steps are essentially RemoteStore::addCAToStore. Move it up to Store. - auto path = - store->addToStoreFromDump(source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair); + auto path = store->addToStoreFromDump( + source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair, provenance); return store->queryPathInfo(path); }(); logger->stopWork(); @@ -510,7 +515,21 @@ static void performOp( logger->startWork(); { FramedSource source(conn.from); - store->addMultipleToStore(source, RepairFlag{repair}, dontCheckSigs ? NoCheckSigs : CheckSigs); + auto expected = readNum(source); + for (uint64_t i = 0; i < expected; ++i) { + auto info = WorkerProto::Serialise::read( + *store, + WorkerProto::ReadConn{ + .from = source, + .version = conn.protoVersion.features.contains(WorkerProto::featureVersionedAddToStoreMultiple) + ? conn.protoVersion + : WorkerProto::Version{.number = {.major = 1, .minor = 16}}, + }); + info.ultimate = false; + EnsureRead wrapper{source, info.narSize}; + store->addToStore(info, wrapper, RepairFlag{repair}, dontCheckSigs ? NoCheckSigs : CheckSigs); + wrapper.finish(); + } } logger->stopWork(); break; @@ -737,6 +756,7 @@ static void performOp( options.action = WorkerProto::Serialise::read(*store, rconn); options.pathsToDelete = WorkerProto::Serialise::read(*store, rconn); conn.from >> options.ignoreLiveness >> options.maxFreed; + options.censor = !trusted; // obsolete fields readInt(conn.from); readInt(conn.from); @@ -745,7 +765,7 @@ static void performOp( GCResults results; logger->startWork(); - if (options.ignoreLiveness) + if (options.ignoreLiveness && !getEnv("_NIX_IN_TEST").has_value()) throw Error("you are not allowed to ignore liveness"); auto & gcStore = require(*store); gcStore.collectGarbage(options, results); @@ -905,6 +925,9 @@ static void performOp( conn.from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = WorkerProto::Serialise>::read(*store, rconn); info.ca = ContentAddress::parseOpt(readString(conn.from)); + info.provenance = conn.protoVersion.features.contains(WorkerProto::featureProvenance) + ? Provenance::from_json_str_optional(readString(conn.from)) + : nullptr; conn.from >> repair >> dontCheckSigs; if (!trusted && dontCheckSigs) dontCheckSigs = false; @@ -934,8 +957,9 @@ static void performOp( logger->startWork(); - // FIXME: race if addToStore doesn't read source? - store->addToStore(info, *source, (RepairFlag) repair, dontCheckSigs ? NoCheckSigs : CheckSigs); + EnsureRead wrapper{*source, info.narSize}; + store->addToStore(info, wrapper, (RepairFlag) repair, dontCheckSigs ? NoCheckSigs : CheckSigs); + wrapper.finish(); logger->stopWork(); } @@ -1005,6 +1029,15 @@ static void performOp( break; } + case WorkerProto::Op::QueryActiveBuilds: { + logger->startWork(); + auto & activeBuildsStore = require(*store); + auto activeBuilds = activeBuildsStore.queryActiveBuilds(); + logger->stopWork(); + conn.to << nlohmann::json(activeBuilds).dump(); + break; + } + default: throw Error("invalid operation %1%", op); } @@ -1030,7 +1063,10 @@ void processConnection(ref store, FdSource && from, FdSink && to, Trusted /* Exchange the greeting. */ WorkerProto::BasicServerConnection conn; - conn.protoVersion = WorkerProto::BasicServerConnection::handshake(to, from, WorkerProto::latest); + auto version = WorkerProto::latest; + if (!experimentalFeatureSettings.isEnabled(Xp::Provenance)) + version.features.erase(std::string(WorkerProto::featureProvenance)); + conn.protoVersion = WorkerProto::BasicServerConnection::handshake(to, from, version); if (conn.protoVersion.number < WorkerProto::minimum.number) throw Error("the Nix client version is too old"); diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index ab969a089f57..719f238549e3 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -9,6 +9,7 @@ #include "nix/store/common-protocol-impl.hh" #include "nix/util/strings-inline.hh" #include "nix/util/json-utils.hh" +#include "nix/store/async-path-writer.hh" #include #include @@ -132,7 +133,8 @@ StorePath computeStorePath(const StoreDirConfig & store, const Derivation & drv) return path; } -StorePath Store::writeDerivation(const Derivation & drv, RepairFlag repair) +StorePath +Store::writeDerivation(const Derivation & drv, RepairFlag repair, std::shared_ptr provenance) { auto [suffix, contents, references, path] = infoForDerivation(*this, drv); @@ -153,12 +155,26 @@ StorePath Store::writeDerivation(const Derivation & drv, RepairFlag repair) ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, references, - repair); + repair, + provenance); assert(path2 == path); return path; } +StorePath Store::writeDerivation( + AsyncPathWriter & asyncPathWriter, + const Derivation & drv, + RepairFlag repair, + std::shared_ptr provenance) +{ + auto references = drv.inputSrcs; + for (auto & i : drv.inputDrvs.map) + references.insert(i.first); + return asyncPathWriter.addPath( + drv.unparse(*this, false), std::string(drv.name) + drvExtension, references, repair, provenance); +} + namespace { /** * This mimics std::istream to some extent. We use this much smaller implementation @@ -1524,7 +1540,7 @@ adl_serializer::from_json(const json & _json, const Experiment } } -void adl_serializer::to_json(json & res, const Derivation & d) +void adl_serializer::to_json(json & res, const BasicDerivation & d) { res = nlohmann::json::object(); @@ -1550,24 +1566,6 @@ void adl_serializer::to_json(json & res, const Derivation & d) for (auto & input : d.inputSrcs) inputsList.emplace_back(input); } - - auto doInput = [&](this const auto & doInput, const auto & inputNode) -> nlohmann::json { - auto value = nlohmann::json::object(); - value["outputs"] = inputNode.value; - { - auto next = nlohmann::json::object(); - for (auto & [outputId, childNode] : inputNode.childMap) - next[outputId] = doInput(childNode); - value["dynamicOutputs"] = std::move(next); - } - return value; - }; - - auto & inputDrvsObj = inputsObj["drvs"]; - inputDrvsObj = nlohmann::json::object(); - for (auto & [inputDrv, inputNode] : d.inputDrvs.map) { - inputDrvsObj[inputDrv.to_string()] = doInput(inputNode); - } } res["system"] = d.platform; @@ -1579,6 +1577,28 @@ void adl_serializer::to_json(json & res, const Derivation & d) res["structuredAttrs"] = d.structuredAttrs->structuredAttrs; } +void adl_serializer::to_json(json & res, const Derivation & d) +{ + adl_serializer::to_json(res, static_cast(d)); + + auto doInput = [&](this const auto & doInput, const auto & inputNode) -> nlohmann::json { + auto value = nlohmann::json::object(); + value["outputs"] = inputNode.value; + { + auto next = nlohmann::json::object(); + for (auto & [outputId, childNode] : inputNode.childMap) + next[outputId] = doInput(childNode); + value["dynamicOutputs"] = std::move(next); + } + return value; + }; + + auto & inputDrvsObj = res["inputs"]["drvs"]; + inputDrvsObj = nlohmann::json::object(); + for (auto & [inputDrv, inputNode] : d.inputDrvs.map) + inputDrvsObj[inputDrv.to_string()] = doInput(inputNode); +} + Derivation adl_serializer::from_json(const json & _json, const ExperimentalFeatureSettings & xpSettings) { using nlohmann::detail::value_t; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 943873119ca5..dde799ba528c 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -215,7 +215,9 @@ struct DummyStoreImpl : DummyStore if (info.path.isDerivation()) { warn("back compat supporting `addToStore` for inserting derivations in dummy store"); writeDerivation( - parseDerivation(*this, accessor->readFile(CanonPath::root), Derivation::nameFromPath(info.path))); + parseDerivation(*this, accessor->readFile(CanonPath::root), Derivation::nameFromPath(info.path)), + repair, + info.provenance); return; } @@ -232,11 +234,12 @@ struct DummyStoreImpl : DummyStore StorePath addToStoreFromDump( Source & source, std::string_view name, - FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, - ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, - HashAlgorithm hashAlgo = HashAlgorithm::SHA256, - const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) override + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override { if (isDerivation(name)) throw Error("Do not insert derivation into dummy store with `addToStoreFromDump`"); @@ -284,6 +287,7 @@ struct DummyStoreImpl : DummyStore std::move(narHash.first)); info.narSize = narHash.second.value(); + info.provenance = provenance; auto path = info.path; auto accessor = make_ref(std::move(*temp)); @@ -299,7 +303,8 @@ struct DummyStoreImpl : DummyStore return path; } - StorePath writeDerivation(const Derivation & drv, RepairFlag repair = NoRepair) override + StorePath + writeDerivation(const Derivation & drv, RepairFlag repair, std::shared_ptr provenance) override { auto drvPath = nix::computeStorePath(*this, drv); @@ -307,6 +312,7 @@ struct DummyStoreImpl : DummyStore if (config->readOnly) unsupported("writeDerivation"); derivations.insert({drvPath, drv}); + // FIXME: record provenance } return drvPath; diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc index b1c61626c8c7..4dd1598442c4 100644 --- a/src/libstore/export-import.cc +++ b/src/libstore/export-import.cc @@ -4,92 +4,171 @@ #include "nix/util/archive.hh" #include "nix/store/common-protocol.hh" #include "nix/store/common-protocol-impl.hh" - -#include +#include "nix/store/worker-protocol.hh" namespace nix { -static void exportPath(Store & store, const StorePath & path, Sink & sink) -{ - auto info = store.queryPathInfo(path); - - HashSink hashSink(HashAlgorithm::SHA256); - TeeSink teeSink(sink, hashSink); - - store.narFromPath(path, teeSink); - - /* Refuse to export paths that have changed. This prevents - filesystem corruption from spreading to other machines. - Don't complain if the stored hash is zero (unknown). */ - Hash hash = hashSink.currentHash().hash; - if (hash != info->narHash && info->narHash != Hash(info->narHash.algo)) - throw Error( - "hash of path '%s' has changed from '%s' to '%s'!", - store.printStorePath(path), - info->narHash.to_string(HashFormat::Nix32, true), - hash.to_string(HashFormat::Nix32, true)); - - teeSink << exportMagic << store.printStorePath(path); - CommonProto::write(store, CommonProto::WriteConn{.to = teeSink}, info->references); - teeSink << (info->deriver ? store.printStorePath(*info->deriver) : "") << 0; -} +static const uint32_t exportMagicV1 = 0x4558494e; +static const uint64_t exportMagicV2 = 0x324f4952414e; // = 'NARIO2' -void exportPaths(Store & store, const StorePathSet & paths, Sink & sink) +static WorkerProto::Version exportProtoVersion{ + .number = + { + .major = 1, + .minor = 16, + }, +}; + +void exportPaths(Store & store, const StorePathSet & paths, Sink & sink, unsigned int version) { auto sorted = store.topoSortPaths(paths); std::reverse(sorted.begin(), sorted.end()); - for (auto & path : sorted) { - sink << 1; - exportPath(store, path, sink); + auto dumpNar = [&](const ValidPathInfo & info) { + HashSink hashSink(HashAlgorithm::SHA256); + TeeSink teeSink(sink, hashSink); + + store.narFromPath(info.path, teeSink); + + /* Refuse to export paths that have changed. This prevents + filesystem corruption from spreading to other machines. + Don't complain if the stored hash is zero (unknown). */ + Hash hash = hashSink.currentHash().hash; + if (hash != info.narHash && info.narHash != Hash(info.narHash.algo)) + throw Error( + "hash of path '%s' has changed from '%s' to '%s'!", + store.printStorePath(info.path), + info.narHash.to_string(HashFormat::Nix32, true), + hash.to_string(HashFormat::Nix32, true)); + }; + + switch (version) { + + case 1: + for (auto & path : sorted) { + sink << 1; + auto info = store.queryPathInfo(path); + dumpNar(*info); + sink << exportMagicV1 << store.printStorePath(path); + CommonProto::write(store, CommonProto::WriteConn{.to = sink}, info->references); + sink << (info->deriver ? store.printStorePath(*info->deriver) : "") << 0; + } + sink << 0; + break; + + case 2: + sink << exportMagicV2; + + for (auto & path : sorted) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("exporting path '%s'", store.printStorePath(path))); + sink << 1; + auto info = store.queryPathInfo(path); + // FIXME: move to CommonProto? + WorkerProto::Serialise::write( + store, + WorkerProto::WriteConn{.to = sink, .version = exportProtoVersion, .shortStorePaths = true}, + *info); + dumpNar(*info); + } + + sink << 0; + break; + + default: + throw Error("unsupported nario version %d", version); } - - sink << 0; } StorePaths importPaths(Store & store, Source & source, CheckSigsFlag checkSigs) { StorePaths res; - while (true) { - auto n = readNum(source); - if (n == 0) - break; - if (n != 1) - throw Error("input doesn't look like something created by 'nix-store --export'"); - - /* Extract the NAR from the source. */ + + auto version = readNum(source); + + /* Note: nario version 1 lacks an explicit header. The first + integer denotes whether a store path follows or not. So look + for 0 or 1. */ + switch (version) { + + case 0: + /* Empty version 1 nario, nothing to do. */ + break; + + case 1: { + /* Reuse a string buffer to avoid kernel overhead allocating + memory for large strings. */ StringSink saved; - TeeSource tee{source, saved}; - NullFileSystemObjectSink ether; - parseDump(ether, tee); - uint32_t magic = readInt(source); - if (magic != exportMagic) - throw Error("Nix archive cannot be imported; wrong format"); + /* Non-empty version 1 nario. */ + while (true) { + /* Extract the NAR from the source. */ + saved.s.clear(); + TeeSource tee{source, saved}; + NullFileSystemObjectSink ether; + parseDump(ether, tee); + + uint32_t magic = readInt(source); + if (magic != exportMagicV1) + throw Error("nario cannot be imported; wrong format"); + + auto path = store.parseStorePath(readString(source)); + + auto references = CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = source}); + auto deriver = readString(source); + + // Ignore optional legacy signature. + if (readInt(source) == 1) + readString(source); + + if (!store.isValidPath(path)) { + auto narHash = hashString(HashAlgorithm::SHA256, saved.s); + + ValidPathInfo info{path, {store, narHash}}; + if (deriver != "") + info.deriver = store.parseStorePath(deriver); + info.references = references; + info.narSize = saved.s.size(); + + // Can't use underlying source, which would have been exhausted. + auto source2 = StringSource(saved.s); + store.addToStore(info, source2, NoRepair, checkSigs); + } + + res.push_back(path); + + auto n = readNum(source); + if (n == 0) + break; + if (n != 1) + throw Error("input doesn't look like a nario"); + } + break; + } - auto path = store.parseStorePath(readString(source)); + case exportMagicV2: + while (true) { + auto n = readNum(source); + if (n == 0) + break; + if (n != 1) + throw Error("input doesn't look like a nario"); - // Activity act(*logger, lvlInfo, "importing path '%s'", info.path); + auto info = WorkerProto::Serialise::read( + store, WorkerProto::ReadConn{.from = source, .version = exportProtoVersion, .shortStorePaths = true}); - auto references = CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = source}); - auto deriver = readString(source); - auto narHash = hashString(HashAlgorithm::SHA256, saved.s); + Activity act( + *logger, lvlTalkative, actUnknown, fmt("importing path '%s'", store.printStorePath(info.path))); - ValidPathInfo info{path, {store, narHash}}; - if (deriver != "") - info.deriver = store.parseStorePath(deriver); - info.references = references; - info.narSize = saved.s.size(); + EnsureRead wrapper{source, info.narSize}; + store.addToStore(info, wrapper, NoRepair, checkSigs); - // Ignore optional legacy signature. - if (readInt(source) == 1) - readString(source); + res.push_back(info.path); + } - // Can't use underlying source, which would have been exhausted - auto source = StringSource(saved.s); - store.addToStore(info, source, NoRepair, checkSigs); + break; - res.push_back(info.path); + default: + throw Error("input doesn't look like a nario"); } return res; diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index ca0bdb46b8df..b72b838e20bb 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -499,7 +499,7 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt( req, CURLOPT_USERAGENT, - ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + " DeterminateNix/" + determinateNixVersion + (fileTransfer.settings.userAgentSuffix != "" ? " " + fileTransfer.settings.userAgentSuffix.get() : "")) .c_str()); @@ -865,8 +865,13 @@ struct curlFileTransfer : public FileTransfer void workerThreadMain() { + /* NOTE(cole-h): the maxQueueSize needs to be >0 or else things will hang */ + assert(maxQueueSize > 0); + /* Cause this thread to be notified on SIGINT. */ -#ifndef _WIN32 // TODO need graceful async exit support on Windows? +#if !defined(_WIN32) && !defined(IS_STATIC) // TODO need graceful async exit support on Windows? + // FIXME(RossComputerGuy): this causes issues on static builds. + // In particular, it causes a segfault to happen at the end of the program running. auto callback = createInterruptCallback([&]() { stopWorkerThread(); }); #endif @@ -1050,14 +1055,24 @@ ref makeCurlFileTransfer(const FileTransferSettings & settings return make_ref(settings); } +static Sync> _fileTransfer; + ref getFileTransfer() { - static ref fileTransfer = makeCurlFileTransfer(); + auto fileTransfer(_fileTransfer.lock()); - if (fileTransfer->state_.lock()->isQuitting()) - fileTransfer = makeCurlFileTransfer(); + if (!*fileTransfer || (*fileTransfer)->state_.lock()->isQuitting()) + *fileTransfer = makeCurlFileTransfer().get_ptr(); - return fileTransfer; + return ref(*fileTransfer); +} + +std::shared_ptr resetFileTransfer() +{ + auto fileTransfer(_fileTransfer.lock()); + std::shared_ptr prev; + fileTransfer->swap(prev); + return prev; } ref makeFileTransfer(const FileTransferSettings & settings) diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 16b81abf2821..f41944a92ce0 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -204,7 +204,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor) while ((end = contents.find((char) 0, pos)) != std::string::npos) { auto root = std::string_view(contents).substr(pos, end - pos); debug("got temporary root '%s'", root); - tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid)); + tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{nix-process:%d}", pid)); pos = end + 1; } } @@ -358,13 +358,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) bool keepOutputs = gcSettings.keepOutputs; bool keepDerivations = gcSettings.keepDerivations; - boost::unordered_flat_set> roots, dead, alive; + Roots roots; + boost::unordered_flat_set> dead, alive; struct Shared { // The temp roots only store the hash part to make it easier to // ignore suffixes like '.lock', '.chroot' and '.check'. - boost::unordered_flat_set> tempRoots; + boost::unordered_flat_map tempRoots; // Hash part of the store path currently being deleted, if // any. @@ -476,7 +477,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) debug("got new GC root '%s'", path); auto hashPart = storePath->hashPart(); auto shared(_shared.lock()); - shared->tempRoots.emplace(hashPart); + // FIXME: could get the PID from the socket. + shared->tempRoots.insert_or_assign(std::string(hashPart), "{nix-process:unknown}"); /* If this path is currently being deleted, then we have to wait until deletion is finished to ensure that @@ -516,20 +518,16 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) /* Find the roots. Since we've grabbed the GC lock, the set of permanent roots cannot increase now. */ printInfo("finding garbage collector roots..."); - Roots rootMap; if (!options.ignoreLiveness) - findRootsNoTemp(rootMap, true); - - for (auto & i : rootMap) - roots.insert(i.first); + findRootsNoTemp(roots, options.censor); /* Read the temporary roots created before we acquired the global GC root. Any new roots will be sent to our socket. */ - Roots tempRoots; - findTempRoots(tempRoots, true); - for (auto & root : tempRoots) { - _shared.lock()->tempRoots.emplace(root.first.hashPart()); - roots.insert(root.first); + { + Roots tempRoots; + findTempRoots(tempRoots, options.censor); + for (auto & root : tempRoots) + _shared.lock()->tempRoots.insert_or_assign(std::string(root.first.hashPart()), *root.second.begin()); } /* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */ @@ -627,20 +625,32 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) } }; + if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) { + throw Error( + "Cannot delete path '%s' because it's referenced by path '%s'.", + printStorePath(start), + printStorePath(*path)); + } + /* If this is a root, bail out. */ - if (roots.count(*path)) { + if (auto i = roots.find(*path); i != roots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's referenced by the GC root '%s'.", + printStorePath(start), + *i->second.begin()); debug("cannot delete '%s' because it's a root", printStorePath(*path)); return markAlive(); } - if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) - return; - - { + static bool inTest = getEnv("_NIX_IN_TEST").has_value(); + if (!(inTest && options.ignoreLiveness)) { auto hashPart = path->hashPart(); auto shared(_shared.lock()); - if (shared->tempRoots.count(hashPart)) { - debug("cannot delete '%s' because it's a temporary root", printStorePath(*path)); + if (auto i = shared->tempRoots.find(std::string(hashPart)); i != shared->tempRoots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's in use by '%s'.", printStorePath(start), i->second); return markAlive(); } shared->pending = hashPart; @@ -699,12 +709,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) for (auto & i : options.pathsToDelete) { deleteReferrersClosure(i); - if (!dead.count(i)) - throw Error( - "Cannot delete path '%1%' since it is still alive. " - "To find out why, use: " - "nix-store --query --roots and nix-store --query --referrers", - printStorePath(i)); + assert(dead.count(i)); } } else if (options.maxFreed > 0) { diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 2e262aec8a0a..cc793db89ef0 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -20,6 +20,8 @@ #include #include +#include + #ifndef _WIN32 # include #endif @@ -288,6 +290,20 @@ const ExternalBuilder * LocalSettings::findExternalDerivationBuilderIfSupported( return nullptr; } +std::optional WorkerSettings::getHostName() +{ + if (hostName != "") + return hostName; + +#ifndef _WIN32 + char hostname[_POSIX_HOST_NAME_MAX + 1]; + if (gethostname(hostname, sizeof(hostname)) == 0) + return std::string(hostname); +#endif + + return std::nullopt; +} + ProfileDirsOptions Settings::getProfileDirsOptions() const { return { @@ -298,6 +314,8 @@ ProfileDirsOptions Settings::getProfileDirsOptions() const std::string nixVersion = PACKAGE_VERSION; +const std::string determinateNixVersion = DETERMINATE_NIX_VERSION; + NLOHMANN_JSON_SERIALIZE_ENUM( SandboxMode, { @@ -375,6 +393,27 @@ void from_json(const nlohmann::json & j, ChrootPath & cp) cp.optional = j.at("optional").get(); } +static nlohmann::json pathsInChrootToJSON(const PathsInChroot & paths) +{ + auto j = nlohmann::json::object(); + for (auto & [target, chrootPath] : paths) { + nlohmann::json cp; + to_json(cp, chrootPath); + j[target.string()] = std::move(cp); + } + return j; +} + +template<> +std::map BaseSetting::toJSONObject() const +{ + auto obj = AbstractSetting::toJSONObject(); + obj.emplace("value", pathsInChrootToJSON(value)); + obj.emplace("defaultValue", pathsInChrootToJSON(defaultValue)); + obj.emplace("documentDefault", documentDefault); + return obj; +} + template<> PathsInChroot BaseSetting::parse(const std::string & str) const { @@ -444,6 +483,24 @@ std::string BaseSetting::to_string() const return nlohmann::json(value).dump(); } +template +T JSONSetting::parse(const std::string & str) const +{ + try { + return nlohmann::json::parse(str).template get(); + } catch (std::exception & e) { + throw UsageError("parsing setting '%s': %s", BaseSetting::name, e.what()); + } +} + +template +std::string JSONSetting::to_string() const +{ + return nlohmann::json(BaseSetting::get()).dump(); +} + +template class JSONSetting; + template<> void BaseSetting::appendOrSet(PathsInChroot newValue, bool append) { diff --git a/src/libstore/include/nix/store/active-builds.hh b/src/libstore/include/nix/store/active-builds.hh new file mode 100644 index 000000000000..2dc914f35606 --- /dev/null +++ b/src/libstore/include/nix/store/active-builds.hh @@ -0,0 +1,112 @@ +#pragma once + +#include "nix/util/util.hh" +#include "nix/util/json-impls.hh" +#include "nix/store/path.hh" + +#include +#include + +namespace nix { + +/** + * A uid and optional corresponding user name. + */ +struct UserInfo +{ + uid_t uid = -1; + std::optional name; + + /** + * Create a UserInfo from a UID, looking up the username if possible. + */ + static UserInfo fromUid(uid_t uid); +}; + +struct ActiveBuild +{ + pid_t nixPid; + + std::optional clientPid; + std::optional clientUid; + + pid_t mainPid; + UserInfo mainUser; + std::optional cgroup; + + time_t startTime; + + StorePath derivation; +}; + +struct ActiveBuildInfo : ActiveBuild +{ + struct ProcessInfo + { + pid_t pid = 0; + pid_t parentPid = 0; + UserInfo user; + std::vector argv; + std::optional utime, stime, cutime, cstime; + }; + + // User/system CPU time for the entire cgroup, if available. + std::optional utime, stime; + + std::vector processes; +}; + +struct TrackActiveBuildsStore +{ + struct BuildHandle + { + TrackActiveBuildsStore & tracker; + uint64_t id; + + BuildHandle(TrackActiveBuildsStore & tracker, uint64_t id) + : tracker(tracker) + , id(id) + { + } + + BuildHandle(BuildHandle && other) noexcept + : tracker(other.tracker) + , id(other.id) + { + other.id = 0; + } + + ~BuildHandle() + { + if (id) { + try { + tracker.buildFinished(*this); + } catch (...) { + ignoreExceptionInDestructor(); + } + } + } + }; + + virtual ~TrackActiveBuildsStore() = default; + + virtual BuildHandle buildStarted(const ActiveBuild & build) = 0; + + virtual void buildFinished(const BuildHandle & handle) = 0; +}; + +struct QueryActiveBuildsStore +{ + inline static std::string operationName = "Querying active builds"; + + virtual ~QueryActiveBuildsStore() = default; + + virtual std::vector queryActiveBuilds() = 0; +}; + +} // namespace nix + +JSON_IMPL(UserInfo) +JSON_IMPL(ActiveBuild) +JSON_IMPL(ActiveBuildInfo) +JSON_IMPL(ActiveBuildInfo::ProcessInfo) diff --git a/src/libstore/include/nix/store/async-path-writer.hh b/src/libstore/include/nix/store/async-path-writer.hh new file mode 100644 index 000000000000..695321ccb1f3 --- /dev/null +++ b/src/libstore/include/nix/store/async-path-writer.hh @@ -0,0 +1,25 @@ +#pragma once + +#include "nix/store/store-api.hh" + +namespace nix { + +struct AsyncPathWriter +{ + virtual ~AsyncPathWriter() = default; + + virtual StorePath addPath( + std::string contents, + std::string name, + StorePathSet references, + RepairFlag repair, + std::shared_ptr provenance = {}) = 0; + + virtual void waitForPath(const StorePath & path) = 0; + + virtual void waitForAllPaths() = 0; + + static ref make(ref store); +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 2c3fbdc3a0d1..fbd347e7684f 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -107,6 +107,11 @@ protected: public: + bool includeInProvenance() override + { + return true; + } + virtual bool fileExists(const std::string & path) = 0; virtual void upsertFile( @@ -187,7 +192,8 @@ public: ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) override; + RepairFlag repair, + std::shared_ptr provenance) override; StorePath addToStore( std::string_view name, diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index c664e6e5b6f7..ef2d5ecc817c 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -13,6 +13,8 @@ namespace nix { +struct Provenance; + /** * Names must be disjoint with `BuildResultFailureStatus`. * @@ -50,6 +52,7 @@ enum struct BuildResultFailureStatus : uint8_t { /// know about this one, so change it back to `OutputRejected` /// before serialization. HashMismatch, + Cancelled, }; /** @@ -73,6 +76,11 @@ struct BuildError : public CloneableError */ bool isNonDeterministic = false; + /** + * The provenance of the derivation, if any. + */ + std::shared_ptr provenance; + public: /** * Variadic constructor for throwing with format strings. @@ -90,6 +98,7 @@ public: Status status; HintFmt msg; bool isNonDeterministic = false; + std::shared_ptr provenance; }; /** @@ -100,7 +109,7 @@ public: : CloneableError(std::move(args.msg)) , status{args.status} , isNonDeterministic{args.isNonDeterministic} - + , provenance{args.provenance} { } @@ -130,6 +139,12 @@ struct BuildResult */ SingleDrvOutputs builtOutputs; + /** + * The provenance of the derivation, if any. Note that this is the provenance of the current build, not + * necessarily of previously existing outputs. + */ + std::shared_ptr provenance; + bool operator==(const BuildResult::Success &) const noexcept; std::strong_ordering operator<=>(const BuildResult::Success &) const noexcept; }; @@ -192,6 +207,13 @@ struct BuildResult bool operator==(const BuildResult &) const noexcept; std::strong_ordering operator<=>(const BuildResult &) const noexcept; + + bool isCancelled() const + { + auto failure = tryGetFailure(); + // FIXME: remove MiscFailure eventually. + return failure && (failure->status == Failure::Cancelled || failure->status == Failure::MiscFailure); + } }; /** diff --git a/src/libstore/include/nix/store/build/build-log.hh b/src/libstore/include/nix/store/build/build-log.hh index cdc9125734d1..4fd71a65f5d2 100644 --- a/src/libstore/include/nix/store/build/build-log.hh +++ b/src/libstore/include/nix/store/build/build-log.hh @@ -3,6 +3,7 @@ #include "nix/util/logging.hh" #include "nix/util/serialise.hh" +#include "nix/util/ref.hh" #include #include @@ -35,9 +36,9 @@ private: public: /** - * The build activity. Owned by BuildLog. + * The build activity. */ - std::unique_ptr act; + ref act; /** * Map for tracking nested activities from JSON messages. @@ -48,7 +49,7 @@ public: * @param maxTailLines Maximum number of tail lines to keep * @param act Activity for this build */ - BuildLog(size_t maxTailLines, std::unique_ptr act); + BuildLog(size_t maxTailLines, ref act); /** * Process output data from child process. diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index f521c0402a2f..3b5b8504966a 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -58,6 +58,11 @@ struct DerivationBuilderParams /** The path of the derivation. */ const StorePath & drvPath; + /** + * The provenance of the derivation, if known + */ + const std::shared_ptr drvProvenance; + BuildResult & buildResult; /** @@ -99,6 +104,11 @@ struct DerivationBuilderParams StringSet systemFeatures; DesugaredEnv desugaredEnv; + + /** + * The activity corresponding to the build. + */ + ref act; }; /** diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index 826cf2c50f07..615b5f04ea38 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -109,7 +109,10 @@ private: */ std::pair checkPathValidity(std::map & initialOutputs); - Done doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs); + Done doneSuccess( + BuildResult::Success::Status status, + SingleDrvOutputs builtOutputs, + std::shared_ptr provenance = nullptr); Done doneFailure(BuildError ex); diff --git a/src/libstore/include/nix/store/build/goal.hh b/src/libstore/include/nix/store/build/goal.hh index 263647fc126a..7706ad20b4d3 100644 --- a/src/libstore/include/nix/store/build/goal.hh +++ b/src/libstore/include/nix/store/build/goal.hh @@ -118,7 +118,7 @@ public: /** * Build result. */ - BuildResult buildResult; + BuildResult buildResult{BuildError(BuildResult::Failure::Cancelled, "")}; /** * Suspend our goal and wait until we get `work`-ed again. diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 7ce28d1deff4..84ddcc8ab19c 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -70,6 +70,8 @@ public: { return JobCategory::Substitution; }; + + Done doneFailure(ExitCode result, BuildResult::Failure failure); }; } // namespace nix diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index e2caba3f1839..feeb6b49897c 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -8,8 +8,12 @@ # include "nix/store/aws-creds.hh" #endif +#include + namespace nix { +struct StructuredAttrs; + struct BuiltinBuilderContext { const BasicDerivation & drv; diff --git a/src/libstore/include/nix/store/common-protocol.hh b/src/libstore/include/nix/store/common-protocol.hh index 341d87b41667..877e3754a3fc 100644 --- a/src/libstore/include/nix/store/common-protocol.hh +++ b/src/libstore/include/nix/store/common-protocol.hh @@ -35,6 +35,7 @@ struct CommonProto struct ReadConn { Source & from; + bool shortStorePaths = false; }; /** @@ -44,6 +45,7 @@ struct CommonProto struct WriteConn { Sink & to; + bool shortStorePaths = false; }; template diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index 0d137eaf36a2..88bfb6e49be6 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -17,6 +17,7 @@ namespace nix { struct StoreDirConfig; +struct Provenance; /* Abstract syntax of derivations. */ @@ -614,3 +615,4 @@ constexpr unsigned expectedJsonVersionDerivation = 4; JSON_IMPL_WITH_XP_FEATURES(nix::DerivationOutput) JSON_IMPL_WITH_XP_FEATURES(nix::Derivation) +JSON_IMPL_WITH_XP_FEATURES(nix::BasicDerivation) diff --git a/src/libstore/include/nix/store/export-import.hh b/src/libstore/include/nix/store/export-import.hh index 15092202f1f6..4ea696f992f9 100644 --- a/src/libstore/include/nix/store/export-import.hh +++ b/src/libstore/include/nix/store/export-import.hh @@ -4,16 +4,11 @@ namespace nix { -/** - * Magic header of exportPath() output (obsolete). - */ -const uint32_t exportMagic = 0x4558494e; - /** * Export multiple paths in the format expected by `nix-store * --import`. The paths will be sorted topologically. */ -void exportPaths(Store & store, const StorePathSet & paths, Sink & sink); +void exportPaths(Store & store, const StorePathSet & paths, Sink & sink, unsigned int version); /** * Import a sequence of NAR dumps created by `exportPaths()` into the diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 688a6afcc6cc..1bf5dd80fbe6 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -403,6 +403,8 @@ ref getFileTransfer(); */ ref makeFileTransfer(const FileTransferSettings & settings = fileTransferSettings); +std::shared_ptr resetFileTransfer(); + class FileTransferError final : public CloneableError { public: diff --git a/src/libstore/include/nix/store/gc-store.hh b/src/libstore/include/nix/store/gc-store.hh index de016e241de4..d3d42470e2c9 100644 --- a/src/libstore/include/nix/store/gc-store.hh +++ b/src/libstore/include/nix/store/gc-store.hh @@ -7,9 +7,13 @@ namespace nix { +// FIXME: should turn this into an std::variant to represent the +// several root types. +using GcRootInfo = std::string; + typedef boost::unordered_flat_map< StorePath, - boost::unordered_flat_set>, + boost::unordered_flat_set>, std::hash> Roots; @@ -58,6 +62,12 @@ struct GCOptions * Stop after at least `maxFreed` bytes have been freed. */ uint64_t maxFreed{std::numeric_limits::max()}; + + /** + * Whether to hide potentially sensitive information about GC + * roots (such as PIDs). + */ + bool censor = false; }; struct GCResults diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 3f2f122eb590..38bf5d0a7041 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -15,6 +15,16 @@ namespace nix { +template<> +StoreReference BaseSetting::parse(const std::string & str) const; +template<> +std::string BaseSetting::to_string() const; + +template<> +std::set BaseSetting>::parse(const std::string & str) const; +template<> +std::string BaseSetting>::to_string() const; + struct ProfileDirsOptions; struct LogFileSettings : public virtual Config @@ -218,12 +228,8 @@ public: The following system types are widely used, as Nix is actively supported on these platforms: - `x86_64-linux` - - `x86_64-darwin` - - `i686-linux` - `aarch64-linux` - `aarch64-darwin` - - `armv6l-linux` - - `armv7l-linux` In general, you do not have to modify this setting. While you can force Nix to run a Darwin-specific `builder` executable on a Linux machine, the result would obviously be wrong. @@ -423,6 +429,8 @@ public: * Get the options needed for profile directory functions. */ ProfileDirsOptions getProfileDirsOptions() const; + + const ExternalBuilder * findExternalDerivationBuilderIfSupported(const Derivation & drv); }; // FIXME: don't use a global variable. @@ -446,6 +454,8 @@ void loadConfFile(AbstractConfig & config); */ extern std::string nixVersion; +extern const std::string determinateNixVersion; + /** * @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests. * @note When using libexpr, and/or libmain, This is not sufficient. See initNix(). diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index acf755d7b9b0..5706aec300f0 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -73,6 +73,11 @@ struct LegacySSHStore : public virtual Store ref openConnection(); + bool includeInProvenance() override + { + return true; + } + void queryPathInfoUncached( const StorePath & path, Callback> callback) noexcept override; @@ -112,11 +117,12 @@ struct LegacySSHStore : public virtual Store StorePath addToStoreFromDump( Source & dump, std::string_view name, - FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, - ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, - HashAlgorithm hashAlgo = HashAlgorithm::SHA256, - const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) override + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override { unsupported("addToStore"); } diff --git a/src/libstore/include/nix/store/local-settings.hh b/src/libstore/include/nix/store/local-settings.hh index af7eccdf44fd..f702293260bd 100644 --- a/src/libstore/include/nix/store/local-settings.hh +++ b/src/libstore/include/nix/store/local-settings.hh @@ -37,6 +37,14 @@ struct BaseSetting::trait template<> void BaseSetting::appendOrSet(PathsInChroot newValue, bool append); +template<> +std::map BaseSetting::toJSONObject() const; + +template<> +std::vector BaseSetting>::parse(const std::string & str) const; +template<> +std::string BaseSetting>::to_string() const; + struct GCSettings : public virtual Config { Setting reservedSize{ @@ -546,10 +554,24 @@ public: captured by the derivation model itself and are too variable between different versions of the same system to be hard-coded into nix. - The hook is passed the derivation path and, if sandboxes are - enabled, the sandbox directory. It can then modify the sandbox and - send a series of commands to modify various settings to stdout. The - currently recognized commands are: + The hook receives the derivation to be built as JSON in the file + pointed to by the environment variable `NIX_DERIVATION_V4`. See + [@docroot@/protocols/json/derivation/index.md](@docroot@/protocols/json/derivation/index.md) + for the format. For example, to read the `requiredSystemFeatures` + attribute: + + ```sh + jq -r '.env.requiredSystemFeatures' < "$NIX_DERIVATION_V4" + ``` + + > **Deprecated** + > Using the derivation store path passed as `argv[1]` to inspect the + > derivation is deprecated and not recommended. This path may not + > exist when Nix is invoked as a remote builder. + + If sandboxes are enabled, the hook also receives the sandbox + directory as `argv[2]`. It can send a series of commands to modify + various settings to stdout. The currently recognized commands are: - `extra-sandbox-paths`\ Pass a list of files and directories to be included in the diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index 0b1e13b51292..73229e25b6b4 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -6,6 +6,7 @@ #include "nix/store/pathlocks.hh" #include "nix/store/store-api.hh" #include "nix/store/indirect-root-store.hh" +#include "nix/store/active-builds.hh" #include "nix/util/sync.hh" #include @@ -171,7 +172,10 @@ public: StoreReference getReference() const override; }; -class LocalStore : public virtual IndirectRootStore, public virtual GcStore +class LocalStore : public virtual IndirectRootStore, + public virtual GcStore, + public virtual TrackActiveBuildsStore, + public virtual QueryActiveBuildsStore { public: @@ -288,7 +292,8 @@ public: ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) override; + RepairFlag repair, + std::shared_ptr provenance) override; void addTempRoot(const StorePath & path) override; @@ -507,6 +512,24 @@ private: friend struct DerivationGoal; /* Only used for createTempDirInStore. */ friend class DerivationBuilderImpl; + +private: + + std::filesystem::path activeBuildsDir; + + struct ActiveBuildFile + { + AutoCloseFD fd; + AutoDelete del; + }; + + Sync> activeBuilds; + + std::vector queryActiveBuilds() override; + + BuildHandle buildStarted(const ActiveBuild & build) override; + + void buildFinished(const BuildHandle & handle) override; }; } // namespace nix diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 93be5921abc0..9900e64c67a0 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,8 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'active-builds.hh', + 'async-path-writer.hh', 'aws-creds.hh', 'binary-cache-store.hh', 'build-result.hh', @@ -71,6 +73,7 @@ headers = [ config_pub_h ] + files( 'pathlocks.hh', 'posix-fs-canonicalise.hh', 'profiles.hh', + 'provenance.hh', 'realisation.hh', 'references.hh', 'remote-fs-accessor.hh', diff --git a/src/libstore/include/nix/store/path-info.hh b/src/libstore/include/nix/store/path-info.hh index 44840b0e5e45..98f9ebe7aeda 100644 --- a/src/libstore/include/nix/store/path-info.hh +++ b/src/libstore/include/nix/store/path-info.hh @@ -13,6 +13,7 @@ namespace nix { class Store; struct StoreDirConfig; +struct Provenance; /** * JSON format version for path info output. @@ -123,6 +124,12 @@ struct UnkeyedValidPathInfo */ std::optional ca; + /** + * The provenance of this store path, i.e. a link back to the Nix + * expression used to create it. + */ + std::shared_ptr provenance; + UnkeyedValidPathInfo(const UnkeyedValidPathInfo & other) = default; UnkeyedValidPathInfo(const StoreDirConfig & store, Hash narHash); diff --git a/src/libstore/include/nix/store/provenance.hh b/src/libstore/include/nix/store/provenance.hh new file mode 100644 index 000000000000..5f9773e724ea --- /dev/null +++ b/src/libstore/include/nix/store/provenance.hh @@ -0,0 +1,77 @@ +#pragma once + +#include "nix/util/provenance.hh" +#include "nix/util/types.hh" +#include "nix/store/path.hh" +#include "nix/store/outputs-spec.hh" + +namespace nix { + +struct BuildProvenance : Provenance +{ + /** + * The derivation that built this path. + */ + StorePath drvPath; + + /** + * The output of the derivation that corresponds to this path. + */ + OutputName output; + + /** + * The hostname of the machine on which the derivation was built, if known. + */ + std::optional buildHost; + + /** + * User-defined tags from the build host. + */ + StringMap tags; + + /** + * The system type of the derivation. + */ + std::string system; + + /** + * The provenance of the derivation, if known. + */ + std::shared_ptr next; + + // FIXME: do we need anything extra for CA derivations? + + BuildProvenance( + const StorePath & drvPath, + const OutputName & output, + std::optional buildHost, + StringMap tags, + std::string system, + std::shared_ptr next); + + nlohmann::json to_json() const override; +}; + +struct CopiedProvenance : Provenance +{ + /** + * Store URL (typically a binary cache) from which this store + * path was copied. + */ + std::string from; + + /** + * Provenance of the store path in the upstream store, if any. + */ + std::shared_ptr next; + + CopiedProvenance(std::string_view from, std::shared_ptr next) + : from(from) + , next(std::move(next)) + { + } + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 0df2b5bab669..289d694aedd3 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -10,6 +10,7 @@ #include "nix/util/file-descriptor.hh" #include "nix/store/gc-store.hh" #include "nix/store/log-store.hh" +#include "nix/store/active-builds.hh" namespace nix { @@ -26,7 +27,7 @@ struct RemoteStoreConfig : virtual StoreConfig using StoreConfig::StoreConfig; Setting maxConnections{ - this, 1, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; + this, 64, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; Setting maxConnectionAge{ this, @@ -39,7 +40,10 @@ struct RemoteStoreConfig : virtual StoreConfig * \todo RemoteStore is a misnomer - should be something like * DaemonStore. */ -struct RemoteStore : public virtual Store, public virtual GcStore, public virtual LogStore +struct RemoteStore : public virtual Store, + public virtual GcStore, + public virtual LogStore, + public virtual QueryActiveBuildsStore { using Config = RemoteStoreConfig; @@ -81,7 +85,8 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua ContentAddressMethod caMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair); + RepairFlag repair, + std::shared_ptr provenance); /** * Add a content-addressable store path. `dump` will be drained. @@ -89,16 +94,15 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua StorePath addToStoreFromDump( Source & dump, std::string_view name, - FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, - ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, - HashAlgorithm hashAlgo = HashAlgorithm::SHA256, - const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) override; + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override; void addToStore(const ValidPathInfo & info, Source & nar, RepairFlag repair, CheckSigsFlag checkSigs) override; - void addMultipleToStore(Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override; - void addMultipleToStore(PathsSource && pathsToCopy, Activity & act, RepairFlag repair, CheckSigsFlag checkSigs) override; @@ -146,6 +150,8 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void addBuildLog(const StorePath & drvPath, std::string_view log) override; + std::vector queryActiveBuilds() override; + std::optional getVersion() override; void connect() override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index b5a6f6c497e2..89dd231a0cf4 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -43,6 +43,8 @@ struct SourceAccessor; struct NarInfoDiskCache; struct NarInfoDiskCacheSettings; class Store; +struct AsyncPathWriter; +struct Provenance; typedef std::map OutputPathMap; @@ -370,7 +372,9 @@ public: StorePath followLinksToStorePath(std::string_view path) const; /** - * Check whether a path is valid. + * Check whether a path is valid. NOTE: this function does not + * generally cache whether a path is valid. You may want to use + * `maybeQueryPathInfo()`, which does cache. */ bool isValidPath(const StorePath & path); @@ -410,10 +414,17 @@ public: /** * Query information about a valid path. It is permitted to omit - * the name part of the store path. + * the name part of the store path. Throws an exception if the + * path is not valid. */ ref queryPathInfo(const StorePath & path); + /** + * Like `queryPathInfo()`, but returns `nullptr` if the path is + * not valid. + */ + std::shared_ptr maybeQueryPathInfo(const StorePath & path); + /** * Asynchronous version of queryPathInfo(). */ @@ -542,7 +553,8 @@ public: virtual void querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos); /** - * Import a path into the store. + * Import a path into the store. Note that the entire NAR may not be read from `narSource`, e.g. if the path is + * already valid. */ virtual void addToStore( const ValidPathInfo & info, @@ -559,8 +571,6 @@ public: /** * Import multiple paths into the store. */ - virtual void addMultipleToStore(Source & source, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs); - virtual void addMultipleToStore( PathsSource && pathsToCopy, Activity & act, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs); @@ -618,7 +628,8 @@ public: ContentAddressMethod hashMethod = ContentAddressMethod::Raw::NixArchive, HashAlgorithm hashAlgo = HashAlgorithm::SHA256, const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) = 0; + RepairFlag repair = NoRepair, + std::shared_ptr provenance = nullptr) = 0; /** * Add a mapping indicating that `deriver!outputName` maps to the output path @@ -811,7 +822,17 @@ public: /** * Write a derivation to the Nix store, and return its path. */ - virtual StorePath writeDerivation(const Derivation & drv, RepairFlag repair = NoRepair); + virtual StorePath writeDerivation( + const Derivation & drv, RepairFlag repair = NoRepair, std::shared_ptr provenance = nullptr); + + /** + * Asynchronously write a derivation to the Nix store, and return its path. + */ + StorePath writeDerivation( + AsyncPathWriter & asyncPathWriter, + const Derivation & drv, + RepairFlag repair = NoRepair, + std::shared_ptr provenance = nullptr); /** * Read a derivation (which must already be valid). @@ -936,6 +957,15 @@ public: return {}; } + /** + * Whether, when copying *from* this store, a "copied" provenance + * record should be added. + */ + virtual bool includeInProvenance() + { + return false; + } + protected: Stats stats; @@ -954,9 +984,10 @@ protected: }; /** - * Copy a path from one store to another. + * Copy a path from one store to another. Return the path info of the newly added store path, or nullptr if the path was + * already valid. */ -void copyStorePath( +std::shared_ptr copyStorePath( Store & srcStore, Store & dstStore, const StorePath & storePath, diff --git a/src/libstore/include/nix/store/worker-protocol-impl.hh b/src/libstore/include/nix/store/worker-protocol-impl.hh index 26f6b9d44e46..c36145d620d0 100644 --- a/src/libstore/include/nix/store/worker-protocol-impl.hh +++ b/src/libstore/include/nix/store/worker-protocol-impl.hh @@ -45,12 +45,14 @@ struct WorkerProto::Serialise { static T read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { - return CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = conn.from}); + return CommonProto::Serialise::read( + store, CommonProto::ReadConn{.from = conn.from, .shortStorePaths = conn.shortStorePaths}); } static void write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) { - CommonProto::Serialise::write(store, CommonProto::WriteConn{.to = conn.to}, t); + CommonProto::Serialise::write( + store, CommonProto::WriteConn{.to = conn.to, .shortStorePaths = conn.shortStorePaths}, t); } }; diff --git a/src/libstore/include/nix/store/worker-protocol.hh b/src/libstore/include/nix/store/worker-protocol.hh index 4098e8fd9123..7c205016b7e5 100644 --- a/src/libstore/include/nix/store/worker-protocol.hh +++ b/src/libstore/include/nix/store/worker-protocol.hh @@ -109,6 +109,10 @@ struct WorkerProto static const Version minimum; + static constexpr std::string_view featureQueryActiveBuilds = "queryActiveBuilds"; + static constexpr std::string_view featureProvenance = "provenance"; + static constexpr std::string_view featureVersionedAddToStoreMultiple = "versionedAddToStoreMultiple"; + /** * A unidirectional read connection, to be used by the read half of the * canonical serializers below. @@ -117,6 +121,7 @@ struct WorkerProto { Source & from; const Version & version; + bool shortStorePaths = false; }; /** @@ -127,6 +132,7 @@ struct WorkerProto { Sink & to; const Version & version; + bool shortStorePaths = false; }; /** @@ -230,6 +236,7 @@ enum struct WorkerProto::Op : uint64_t { AddBuildLog = 45, BuildPathsWithResults = 46, AddPermRoot = 47, + QueryActiveBuilds = 48, }; struct WorkerProto::ClientHandshakeInfo diff --git a/src/libstore/include/nix/store/worker-settings.hh b/src/libstore/include/nix/store/worker-settings.hh index 13fa0261d8d0..6125dfd7cc26 100644 --- a/src/libstore/include/nix/store/worker-settings.hh +++ b/src/libstore/include/nix/store/worker-settings.hh @@ -7,6 +7,11 @@ namespace nix { +template<> +std::vector BaseSetting>::parse(const std::string & str) const; +template<> +std::string BaseSetting>::to_string() const; + struct MaxBuildJobsSetting : public BaseSetting { MaxBuildJobsSetting( @@ -361,6 +366,25 @@ public: /nix/store/scz72lskj03ihkcn42ias5mlp4i4gr1k-bash-4.4-p23-man /nix/store/a724znygmd1cac856j3gfsyvih3lw07j-bash-4.4-p23`. )"}; + + Setting hostName{ + this, + "", + "host-name", + R"( + The name of this host for recording build provenance. If unset, the Unix host name is used. + )"}; + + std::optional getHostName(); + + JSONSetting buildProvenanceTags{ + this, + {}, + "build-provenance-tags", + R"( + Arbitrary name/value pairs that are recorded in the build provenance of store paths built by this machine. + This can be used to tag builds with metadata such as the CI job URL, build cluster name, etc. + )"}; }; } // namespace nix diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index a213a3e34d91..d3563329b0e3 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -54,7 +54,7 @@ struct LocalStoreAccessor : PosixSourceAccessor void requireStoreObject(const CanonPath & path) { auto [storePath, rest] = store->toStorePath(store->storeDir + path.abs()); - if (requireValidPath && !store->isValidPath(storePath)) + if (requireValidPath && !store->maybeQueryPathInfo(storePath)) throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); } diff --git a/src/libstore/local-store-active-builds.cc b/src/libstore/local-store-active-builds.cc new file mode 100644 index 000000000000..25c6ece58970 --- /dev/null +++ b/src/libstore/local-store-active-builds.cc @@ -0,0 +1,282 @@ +#include "nix/store/local-store.hh" +#include "nix/util/json-utils.hh" +#ifdef __linux__ +# include "nix/util/cgroup.hh" +# include +# include +# include +#endif + +#ifdef __APPLE__ +# include +# include +# include +#endif + +#include +#include + +namespace nix { + +#ifdef __linux__ +static ActiveBuildInfo::ProcessInfo getProcessInfo(pid_t pid) +{ + ActiveBuildInfo::ProcessInfo info; + info.pid = pid; + info.argv = + tokenizeString>(readFile(fmt("/proc/%d/cmdline", pid)), std::string("\000", 1)); + + auto statPath = fmt("/proc/%d/stat", pid); + + AutoCloseFD statFd = open(statPath.c_str(), O_RDONLY | O_CLOEXEC); + if (!statFd) + throw SysError("opening '%s'", statPath); + + // Get the UID from the ownership of the stat file. + struct stat st; + if (fstat(statFd.get(), &st) == -1) + throw SysError("getting ownership of '%s'", statPath); + info.user = UserInfo::fromUid(st.st_uid); + + // Read /proc/[pid]/stat for parent PID and CPU times. + // Format: pid (comm) state ppid ... + // Note that the comm field can contain spaces, so use a regex to parse it. + auto statContent = trim(readFile(statFd.get())); + static std::regex statRegex(R"((\d+) \(([^)]*)\) (.*))"); + std::smatch match; + if (!std::regex_match(statContent, match, statRegex)) + throw Error("failed to parse /proc/%d/stat", pid); + + // Parse the remaining fields after (comm). + auto remainingFields = tokenizeString>(match[3].str()); + + if (remainingFields.size() > 1) + info.parentPid = string2Int(remainingFields[1]).value_or(0); + + static long clkTck = sysconf(_SC_CLK_TCK); + if (remainingFields.size() > 14 && clkTck > 0) { + if (auto utime = string2Int(remainingFields[11])) + info.utime = std::chrono::microseconds((*utime * 1'000'000) / clkTck); + if (auto stime = string2Int(remainingFields[12])) + info.stime = std::chrono::microseconds((*stime * 1'000'000) / clkTck); + if (auto cutime = string2Int(remainingFields[13])) + info.cutime = std::chrono::microseconds((*cutime * 1'000'000) / clkTck); + if (auto cstime = string2Int(remainingFields[14])) + info.cstime = std::chrono::microseconds((*cstime * 1'000'000) / clkTck); + } + + return info; +} + +/** + * Recursively get all descendant PIDs of a given PID using /proc/[pid]/task/[pid]/children. + */ +static std::set getDescendantPids(pid_t pid) +{ + std::set descendants; + + [&](this auto self, pid_t pid) -> void { + try { + descendants.insert(pid); + for (const auto & childPidStr : + tokenizeString>(readFile(fmt("/proc/%d/task/%d/children", pid, pid)))) + if (auto childPid = string2Int(childPidStr)) + self(*childPid); + } catch (...) { + // Process may have exited. + ignoreExceptionExceptInterrupt(); + } + }(pid); + + return descendants; +} +#endif + +#ifdef __APPLE__ +static ActiveBuildInfo::ProcessInfo getProcessInfo(pid_t pid) +{ + ActiveBuildInfo::ProcessInfo info; + info.pid = pid; + + // Get basic process info including ppid and uid. + struct proc_bsdinfo procInfo; + if (proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &procInfo, sizeof(procInfo)) != sizeof(procInfo)) + throw SysError("getting process info for pid %d", pid); + + info.parentPid = procInfo.pbi_ppid; + info.user = UserInfo::fromUid(procInfo.pbi_uid); + + // Get CPU times. + struct proc_taskinfo taskInfo; + if (proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &taskInfo, sizeof(taskInfo)) == sizeof(taskInfo)) { + + mach_timebase_info_data_t timebase; + mach_timebase_info(&timebase); + auto nanosecondsPerTick = (double) timebase.numer / (double) timebase.denom; + + // Convert nanoseconds to microseconds. + info.utime = + std::chrono::microseconds((uint64_t) ((double) taskInfo.pti_total_user * nanosecondsPerTick / 1000)); + info.stime = + std::chrono::microseconds((uint64_t) ((double) taskInfo.pti_total_system * nanosecondsPerTick / 1000)); + } + + // Get argv using sysctl. + int mib[3] = {CTL_KERN, KERN_PROCARGS2, pid}; + size_t size = 0; + + // First call to get size. + if (sysctl(mib, 3, nullptr, &size, nullptr, 0) == 0 && size > 0) { + std::vector buffer(size); + if (sysctl(mib, 3, buffer.data(), &size, nullptr, 0) == 0) { + // Format: argc (int), followed by executable path, followed by null-terminated args + if (size >= sizeof(int)) { + int argc; + memcpy(&argc, buffer.data(), sizeof(argc)); + + // Skip past argc and executable path (null-terminated). + size_t pos = sizeof(int); + while (pos < size && buffer[pos] != '\0') + pos++; + pos++; // Skip the null terminator + + // Parse the arguments. + while (pos < size && info.argv.size() < (size_t) argc) { + size_t argStart = pos; + while (pos < size && buffer[pos] != '\0') + pos++; + + if (pos > argStart) + info.argv.emplace_back(buffer.data() + argStart, pos - argStart); + + pos++; // Skip the null terminator + } + } + } + } + + return info; +} + +/** + * Recursively get all descendant PIDs using sysctl with KERN_PROC. + */ +static std::set getDescendantPids(pid_t startPid) +{ + // Get all processes. + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0}; + size_t size = 0; + + if (sysctl(mib, 4, nullptr, &size, nullptr, 0) == -1) + return {startPid}; + + std::vector procs(size / sizeof(struct kinfo_proc)); + if (sysctl(mib, 4, procs.data(), &size, nullptr, 0) == -1) + return {startPid}; + + // Get the children of all processes. + std::map> children; + size_t count = size / sizeof(struct kinfo_proc); + for (size_t i = 0; i < count; i++) { + pid_t childPid = procs[i].kp_proc.p_pid; + pid_t parentPid = procs[i].kp_eproc.e_ppid; + children[parentPid].insert(childPid); + } + + // Get all children of `pid`. + std::set descendants; + std::queue todo; + todo.push(startPid); + while (auto pid = pop(todo)) { + if (!descendants.insert(*pid).second) + continue; + for (auto & child : children[*pid]) + todo.push(child); + } + + return descendants; +} +#endif + +std::vector LocalStore::queryActiveBuilds() +{ + std::vector result; + + for (auto & entry : DirectoryIterator{activeBuildsDir}) { + auto path = entry.path(); + + try { + // Open the file. If we can lock it, the build is not active. + auto fd = openLockFile(path, false); + if (!fd || lockFile(fd.get(), ltRead, false)) { + AutoDelete(path, false); + continue; + } + + ActiveBuildInfo info(nlohmann::json::parse(readFile(fd.get())).get()); + +#if defined(__linux__) || defined(__APPLE__) + /* Read process information. */ + try { +# ifdef __linux__ + if (info.cgroup) { + for (auto pid : getPidsInCgroup(*info.cgroup)) + info.processes.push_back(getProcessInfo(pid)); + + /* Read CPU statistics from the cgroup. */ + auto stats = getCgroupStats(*info.cgroup); + info.utime = stats.cpuUser; + info.stime = stats.cpuSystem; + } else +# endif + { + for (auto pid : getDescendantPids(info.mainPid)) + info.processes.push_back(getProcessInfo(pid)); + } + } catch (...) { + ignoreExceptionExceptInterrupt(); + } +#endif + + result.push_back(std::move(info)); + } catch (...) { + ignoreExceptionExceptInterrupt(); + } + } + + return result; +} + +LocalStore::BuildHandle LocalStore::buildStarted(const ActiveBuild & build) +{ + // Write info about the active build to the active-builds directory where it can be read by `queryBuilds()`. + static std::atomic nextId{1}; + + auto id = nextId++; + + auto infoFileName = fmt("%d-%d", getpid(), id); + auto infoFilePath = activeBuildsDir / infoFileName; + + auto infoFd = openLockFile(infoFilePath, true); + + // Lock the file to denote that the build is active. + lockFile(infoFd.get(), ltWrite, true); + + writeFile(infoFilePath, nlohmann::json(build).dump(), 0600, FsSync::Yes); + + activeBuilds.lock()->emplace( + id, + ActiveBuildFile{ + .fd = std::move(infoFd), + .del = AutoDelete(infoFilePath, false), + }); + + return BuildHandle(*this, id); +} + +void LocalStore::buildFinished(const BuildHandle & handle) +{ + activeBuilds.lock()->erase(handle.id); +} + +} // namespace nix diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 686e9988e01f..773ee203c504 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -17,6 +17,7 @@ #include "nix/store/keys.hh" #include "nix/util/users.hh" #include "nix/store/store-registration.hh" +#include "nix/util/provenance.hh" #include #include @@ -125,6 +126,7 @@ LocalStore::LocalStore(ref config) , schemaPath(dbDir / "schema") , tempRootsDir(config->stateDir.get() / "temproots") , fnTempRoots(tempRootsDir / std::to_string(getpid())) + , activeBuildsDir(config->stateDir.get() / "active-builds") { auto state(_state->lock()); state->stmts = std::make_unique(); @@ -145,6 +147,7 @@ LocalStore::LocalStore(ref config) const auto & localSettings = config->getLocalSettings(); const auto & gcSettings = localSettings.getGCSettings(); createDirs(gcRootsDir); + createDirs(activeBuildsDir); for (auto & perUserDir : {profilesDir / "per-user", gcRootsDir / "per-user"}) { createDirs(perUserDir); @@ -331,13 +334,16 @@ LocalStore::LocalStore(ref config) /* Prepare SQL statements. */ state->stmts->RegisterValidPath.create( state->db, - "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);"); + fmt("insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca%s) values (?, ?, ?, ?, ?, ?, ?, ?%s);", + experimentalFeatureSettings.isEnabled(Xp::Provenance) ? ", provenance" : "", + experimentalFeatureSettings.isEnabled(Xp::Provenance) ? ", ?" : "")); state->stmts->UpdatePathInfo.create( state->db, "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;"); state->stmts->AddReference.create(state->db, "insert or replace into Refs (referrer, reference) values (?, ?);"); state->stmts->QueryPathInfo.create( state->db, - "select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;"); + fmt("select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca%s from ValidPaths where path = ?;", + experimentalFeatureSettings.isEnabled(Xp::Provenance) ? ", provenance" : "")); state->stmts->QueryReferences.create( state->db, "select path from Refs join ValidPaths on reference = id where referrer = ?;"); state->stmts->QueryReferrers.create( @@ -606,6 +612,9 @@ void LocalStore::upgradeDBSchema(State & state) "20220326-ca-derivations", #include "ca-specific-schema.sql.gen.hh" ); + + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + doUpgrade("20241024-provenance", "alter table ValidPaths add column provenance text"); } /* To improve purity, users may want to make the Nix store a read-only @@ -683,14 +692,15 @@ uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info) "cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", printStorePath(info.path)); - state.stmts->RegisterValidPath - .use()(printStorePath(info.path))(info.narHash.to_string(HashFormat::Base16, true))( - info.registrationTime == 0 ? time(nullptr) : info.registrationTime)( - info.deriver ? printStorePath(*info.deriver) : "", - (bool) info.deriver)(info.narSize, info.narSize != 0)(info.ultimate ? 1 : 0, info.ultimate)( - concatStringsSep(" ", Signature::toStrings(info.sigs)), - !info.sigs.empty())(renderContentAddress(info.ca), (bool) info.ca) - .exec(); + auto query = state.stmts->RegisterValidPath.use()(printStorePath(info.path))(info.narHash.to_string( + HashFormat::Base16, true))(info.registrationTime == 0 ? time(nullptr) : info.registrationTime)( + info.deriver ? printStorePath(*info.deriver) : "", + (bool) info.deriver)(info.narSize, info.narSize != 0)(info.ultimate ? 1 : 0, info.ultimate)( + concatStringsSep(" ", Signature::toStrings(info.sigs)), + !info.sigs.empty())(renderContentAddress(info.ca), (bool) info.ca); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + query(info.provenance ? info.provenance->to_json_str() : "", (bool) info.provenance); + query.exec(); uint64_t id = state.db.getLastInsertedRowId(); /* If this is a derivation, then store the derivation outputs in @@ -779,6 +789,12 @@ std::shared_ptr LocalStore::queryPathInfoInternal(State & s while (useQueryReferences.next()) info->references.insert(parseStorePath(useQueryReferences.getStr(0))); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + auto prov = (const char *) sqlite3_column_text(state.stmts->QueryPathInfo, 8); + if (prov) + info->provenance = Provenance::from_json_str(prov); + } + return info; } @@ -998,18 +1014,6 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairF throw Error("cannot add path '%s' because it lacks a signature by a trusted key", printStorePath(info.path)); { - /* In case we are not interested in reading the NAR: discard it. */ - bool narRead = false; - Finally cleanup = [&]() { - if (!narRead) - try { - source.skip(info.narSize); - } catch (...) { - // TODO: should Interrupted be handled here? - ignoreExceptionInDestructor(); - } - }; - addTempRoot(info.path); if (repair || !isValidPath(info.path)) { @@ -1034,7 +1038,6 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairF TeeSource wrapperSource{source, hashSink}; - narRead = true; restorePath(realPath, wrapperSource, config->getLocalSettings().fsyncStorePaths); auto hashResult = hashSink.finish(); @@ -1043,8 +1046,8 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairF throw Error( "hash mismatch importing path '%s';\n specified: %s\n got: %s", printStorePath(info.path), - info.narHash.to_string(HashFormat::Nix32, true), - hashResult.hash.to_string(HashFormat::Nix32, true)); + info.narHash.to_string(HashFormat::SRI, true), + hashResult.hash.to_string(HashFormat::SRI, true)); if (hashResult.numBytesDigested != info.narSize) throw Error( @@ -1118,7 +1121,8 @@ StorePath LocalStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { /* For computing the store path. */ auto hashSink = std::make_unique(hashAlgo); @@ -1264,6 +1268,7 @@ StorePath LocalStore::addToStoreFromDump( auto info = ValidPathInfo::makeFromCA(*this, name, std::move(desc), narHash.hash); info.narSize = narHash.numBytesDigested; + info.provenance = provenance; registerValidPath(info); } diff --git a/src/libstore/meson.build b/src/libstore/meson.build index edc476ae7344..47c7d20d59c4 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -13,6 +13,8 @@ project( license : 'LGPL-2.1-or-later', ) +fs = import('fs') + cxx = meson.get_compiler('cpp') subdir('nix-meson-build-support/deps-lists') @@ -22,6 +24,12 @@ configdata_priv = configuration_data() # TODO rename, because it will conflict with downstream projects configdata_priv.set_quoted('PACKAGE_VERSION', meson.project_version()) +configdata_priv.set('IS_STATIC', get_option('default_library') == 'static') + +configdata_priv.set_quoted( + 'DETERMINATE_NIX_VERSION', + fs.read('../../.version-determinate').strip(), +) subdir('nix-meson-build-support/default-system-cpu') @@ -203,8 +211,6 @@ if get_option('embedded-sandbox-shell') generated_headers += embedded_sandbox_shell_gen endif -fs = import('fs') - prefix = get_option('prefix') # For each of these paths, assume that it is relative to the prefix unless # it is already an absolute path (which is the default for store-dir, localstatedir, and log-dir). @@ -265,6 +271,19 @@ configdata_priv.set_quoted( : 'lsof', ) +link_args = [] + +wasmtime_required = get_option('wasm').disable_if( + get_option('default_library') == 'static', + error_message : 'Building with wasmtime and static linking is not supported', +) + +if wasmtime_required.enabled() + link_args += '-lwasmtime' +endif + +configdata_priv.set('NIX_USE_WASMTIME', wasmtime_required.enabled().to_int()) + config_priv_h = configure_file( configuration : configdata_priv, output : 'store-config-private.hh', @@ -273,6 +292,8 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') sources = files( + 'active-builds.cc', + 'async-path-writer.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/build-log.cc', @@ -313,6 +334,7 @@ sources = files( 'local-fs-store.cc', 'local-gc.cc', 'local-overlay-store.cc', + 'local-store-active-builds.cc', 'local-store.cc', 'log-store.cc', 'machines.cc', @@ -331,6 +353,7 @@ sources = files( 'pathlocks.cc', 'posix-fs-canonicalise.cc', 'profiles.cc', + 'provenance.cc', 'realisation.cc', 'references.cc', 'remote-fs-accessor.cc', @@ -380,8 +403,8 @@ this_library = library( soversion : nix_soversion, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, - link_args : linker_export_flags, - prelink : true, # For C++ static initializers + link_args : linker_export_flags + link_args, + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libstore/meson.options b/src/libstore/meson.options index c822133df46e..6bae2ab11f17 100644 --- a/src/libstore/meson.options +++ b/src/libstore/meson.options @@ -39,3 +39,9 @@ option( type : 'feature', description : 'build support for AWS authentication with S3', ) + +option( + 'wasm', + type : 'feature', + description : 'enable wasmtime integration into the Nix derivation builder', +) diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 3f330753d949..2ab411d80814 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -3,6 +3,7 @@ #include "nix/util/sync.hh" #include "nix/store/sqlite.hh" #include "nix/store/globals.hh" +#include "nix/store/provenance.hh" #include #include @@ -36,6 +37,7 @@ create table if not exists NARs ( deriver text, sigs text, ca text, + provenance text, timestamp integer not null, present integer not null, primary key (cache, hashPart), @@ -84,7 +86,7 @@ struct NarInfoDiskCacheImpl : NarInfoDiskCache NarInfoDiskCacheImpl( const Settings & settings, SQLiteSettings sqliteSettings, - std::filesystem::path dbPath = getCacheDir() / "binary-cache-v7.sqlite") + std::filesystem::path dbPath = getCacheDir() / "binary-cache-detsys-v1.sqlite") : NarInfoDiskCache{settings} { auto state(_state.lock()); @@ -108,14 +110,14 @@ struct NarInfoDiskCacheImpl : NarInfoDiskCache state->insertNAR.create( state->db, "insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, " - "narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); + "narSize, refs, deriver, sigs, ca, provenance, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); state->insertMissingNAR.create( state->db, "insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)"); state->queryNAR.create( state->db, - "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); + "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca, provenance from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); state->insertRealisation.create( state->db, @@ -282,6 +284,8 @@ struct NarInfoDiskCacheImpl : NarInfoDiskCache for (auto & sig : tokenizeString(queryNAR.getStr(10), " ")) narInfo->sigs.insert(Signature::parse(sig)); narInfo->ca = ContentAddress::parseOpt(queryNAR.getStr(11)); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance) && !queryNAR.isNull(12)) + narInfo->provenance = Provenance::from_json_str_optional(queryNAR.getStr(12)); return {oValid, narInfo}; }); @@ -340,9 +344,10 @@ struct NarInfoDiskCacheImpl : NarInfoDiskCache narInfo && narInfo->fileHash)( narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)(info->narHash.to_string( HashFormat::Nix32, true))(info->narSize)(concatStringsSep(" ", info->shortRefs()))( - info->deriver ? std::string(info->deriver->to_string()) : "", - (bool) info->deriver)(concatStringsSep(" ", Signature::toStrings(info->sigs)))( - renderContentAddress(info->ca))(time(nullptr)) + info->deriver ? std::string(info->deriver->to_string()) : "", (bool) info->deriver)( + concatStringsSep(" ", Signature::toStrings(info->sigs)))(renderContentAddress(info->ca))( + info->provenance ? info->provenance->to_json_str() : "", + experimentalFeatureSettings.isEnabled(Xp::Provenance) && info->provenance)(time(nullptr)) .exec(); } else { diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index b912467d8a83..0f225f22c7d3 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -3,6 +3,7 @@ #include "nix/store/store-api.hh" #include "nix/util/strings.hh" #include "nix/util/json-utils.hh" +#include "nix/util/provenance.hh" namespace nix { @@ -84,7 +85,8 @@ NarInfo::NarInfo(const StoreDirConfig & store, const std::string & s, const std: throw corrupt("extra CA"); // FIXME: allow blank ca or require skipping field? ca = ContentAddress::parseOpt(value); - } + } else if (name == "Provenance" && experimentalFeatureSettings.isEnabled(Xp::Provenance)) + provenance = Provenance::from_json_str(value); pos = eol + 1; line += 1; @@ -129,6 +131,9 @@ std::string NarInfo::to_string(const StoreDirConfig & store) const if (ca) res += "CA: " + renderContentAddress(*ca) + "\n"; + if (provenance && experimentalFeatureSettings.isEnabled(Xp::Provenance)) + res += "Provenance: " + provenance->to_json_str() + "\n"; + return res; } diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 1f7b56f2238e..ecd5d0ae2e6f 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -4,7 +4,7 @@ mkMesonLibrary, unixtools, - darwin, + apple-sdk, nix-util, boost, @@ -15,6 +15,7 @@ nlohmann_json, sqlite, cmake, # for resolving aws-crt-cpp dep + wasmtime, busybox-sandbox-shell ? null, @@ -22,11 +23,13 @@ version, - embeddedSandboxShell ? stdenv.hostPlatform.isStatic, + embeddedSandboxShell ? stdenv.hostPlatform.isStatic && !stdenv.hostPlatform.isDarwin, withAWS ? # Default is this way because there have been issues building this dependency - (lib.meta.availableOn stdenv.hostPlatform aws-c-common), + (lib.meta.availableOn stdenv.hostPlatform aws-c-common) && !stdenv.hostPlatform.isStatic, + + enableWasm ? !stdenv.hostPlatform.isStatic, }: let @@ -34,15 +37,17 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store"; + pname = "determinate-nix-store"; inherit version; workDir = ./.; fileset = fileset.unions [ ../../nix-meson-build-support ./nix-meson-build-support + # FIXME: get rid of these symlinks. ../../.version ./.version + ../../.version-determinate ./meson.build ./meson.options ./include/nix/store/meson.build @@ -67,7 +72,8 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withAWS aws-crt-cpp; + ++ lib.optional withAWS aws-crt-cpp + ++ lib.optional enableWasm wasmtime; propagatedBuildInputs = [ nix-util @@ -78,6 +84,7 @@ mkMesonLibrary (finalAttrs: { (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) (lib.mesonEnable "s3-aws-auth" withAWS) + (lib.mesonEnable "wasm" enableWasm) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index 1c631cf147fa..e1803e1014d0 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -5,6 +5,7 @@ #include "nix/util/json-utils.hh" #include "nix/util/comparator.hh" #include "nix/util/strings.hh" +#include "nix/util/provenance.hh" namespace nix { @@ -212,6 +213,9 @@ UnkeyedValidPathInfo::toJSON(const StoreDirConfig * store, bool includeImpureInf jsonObject["ultimate"] = ultimate; jsonObject["signatures"] = sigs; + + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + jsonObject["provenance"] = provenance ? provenance->to_json() : nullptr; } return jsonObject; @@ -287,6 +291,12 @@ UnkeyedValidPathInfo UnkeyedValidPathInfo::fromJSON(const StoreDirConfig * store if (auto * rawSignatures = optionalValueAt(json, "signatures")) res.sigs = *rawSignatures; + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + auto prov = json.find("provenance"); + if (prov != json.end() && !prov->second.is_null()) + res.provenance = Provenance::from_json(prov->second); + } + return res; } diff --git a/src/libstore/provenance.cc b/src/libstore/provenance.cc new file mode 100644 index 000000000000..e08614a0e97d --- /dev/null +++ b/src/libstore/provenance.cc @@ -0,0 +1,84 @@ +#include "nix/store/provenance.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +static void checkProvenanceTagName(std::string_view name) +{ + static const std::regex tagNameRegex("^[A-Za-z_][A-Za-z0-9_+\\-]*$"); + if (!std::regex_match(name.begin(), name.end(), tagNameRegex)) + throw Error("tag name '%s' is invalid", name); +} + +BuildProvenance::BuildProvenance( + const StorePath & drvPath, + const OutputName & output, + std::optional buildHost, + StringMap tags, + std::string system, + std::shared_ptr next) + : drvPath(drvPath) + , output(output) + , buildHost(std::move(buildHost)) + , tags(std::move(tags)) + , system(std::move(system)) + , next(std::move(next)) +{ + for (const auto & [name, value] : this->tags) + checkProvenanceTagName(name); +} + +nlohmann::json BuildProvenance::to_json() const +{ + return { + {"type", "build"}, + {"drv", drvPath.to_string()}, + {"output", output}, + {"buildHost", buildHost}, + {"system", system}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + {"tags", tags}, + }; +} + +Provenance::Register registerBuildProvenance("build", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + std::optional buildHost; + if (auto p = optionalValueAt(obj, "buildHost")) + buildHost = p->get>(); + StringMap tags; + if (auto p = optionalValueAt(obj, "tags"); p && !p->is_null()) + tags = p->get(); + auto buildProv = make_ref( + StorePath(getString(valueAt(obj, "drv"))), + getString(valueAt(obj, "output")), + std::move(buildHost), + std::move(tags), + getString(valueAt(obj, "system")), + next); + return buildProv; +}); + +nlohmann::json CopiedProvenance::to_json() const +{ + return { + {"type", "copied"}, + {"from", from}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerCopiedProvenance("copied", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + return make_ref(getString(valueAt(obj, "from")), next); +}); + +} // namespace nix diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 05dd5cf13a89..bf03b24fdf99 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -19,6 +19,7 @@ #include "nix/store/filetransfer.hh" #include "nix/util/signals.hh" #include "nix/util/socket.hh" +#include "nix/util/provenance.hh" #ifndef _WIN32 # include @@ -83,7 +84,10 @@ void RemoteStore::initConnection(Connection & conn) StringSink saved; TeeSource tee(conn.from, saved); try { - conn.protoVersion = WorkerProto::BasicClientConnection::handshake(conn.to, tee, WorkerProto::latest); + auto version = WorkerProto::latest; + if (!experimentalFeatureSettings.isEnabled(Xp::Provenance)) + version.features.erase(std::string(WorkerProto::featureProvenance)); + conn.protoVersion = WorkerProto::BasicClientConnection::handshake(conn.to, tee, version); if (conn.protoVersion.number < WorkerProto::minimum.number) throw Error("the Nix daemon version is too old"); } catch (SerialisationError & e) { @@ -321,7 +325,8 @@ ref RemoteStore::addCAToStore( ContentAddressMethod caMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { std::optional conn_(getConnection()); auto & conn = *conn_; @@ -331,6 +336,8 @@ ref RemoteStore::addCAToStore( conn->to << WorkerProto::Op::AddToStore << name << caMethod.renderWithAlgo(hashAlgo); WorkerProto::write(*this, *conn, references); conn->to << repair; + if (conn->protoVersion.features.contains(WorkerProto::featureProvenance)) + conn->to << (provenance ? provenance->to_json_str() : ""); // The dump source may invoke the store, so we need to make some room. connections->incCapacity(); @@ -408,7 +415,8 @@ StorePath RemoteStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { FileSerialisationMethod fsm; switch (hashMethod.getFileIngestionMethod()) { @@ -427,7 +435,7 @@ StorePath RemoteStore::addToStoreFromDump( } if (fsm != dumpMethod) unsupported("RemoteStore::addToStoreFromDump doesn't support this `dumpMethod` `hashMethod` combination"); - auto storePath = addCAToStore(dump, name, hashMethod, hashAlgo, references, repair)->path; + auto storePath = addCAToStore(dump, name, hashMethod, hashAlgo, references, repair, provenance)->path; invalidatePathInfoCacheFor(storePath); return storePath; } @@ -443,7 +451,10 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, Repair WorkerProto::write(*this, *conn, info.references); conn->to << info.registrationTime << info.narSize << info.ultimate; WorkerProto::write(*this, *conn, info.sigs); - conn->to << renderContentAddress(info.ca) << repair << !checkSigs; + conn->to << renderContentAddress(info.ca); + if (conn->protoVersion.features.contains(WorkerProto::featureProvenance)) + conn->to << (info.provenance ? info.provenance->to_json_str() : ""); + conn->to << repair << !checkSigs; if (conn->protoVersion >= WorkerProto::Version{.number = {1, 23}}) { conn.withFramedSink([&](Sink & sink) { copyNAR(source, sink); }); @@ -458,6 +469,13 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, Repair void RemoteStore::addMultipleToStore( PathsSource && pathsToCopy, Activity & act, RepairFlag repair, CheckSigsFlag checkSigs) { + if (getConnection()->protoVersion.number < WorkerProto::Version::Number{1, 32}) { + Store::addMultipleToStore(std::move(pathsToCopy), act, repair, checkSigs); + return; + } + + auto conn(getConnection()); + // `addMultipleToStore` is single threaded size_t bytesExpected = 0; for (auto & [pathInfo, _] : pathsToCopy) { @@ -478,7 +496,9 @@ void RemoteStore::addMultipleToStore( *this, WorkerProto::WriteConn{ .to = sink, - .version = {.number = {.major = 1, .minor = 16}}, + .version = conn->protoVersion.features.contains(WorkerProto::featureVersionedAddToStoreMultiple) + ? conn->protoVersion + : WorkerProto::Version{.number = {.major = 1, .minor = 16}}, }, pathInfo); pathSource->drainInto(sink); @@ -486,17 +506,8 @@ void RemoteStore::addMultipleToStore( } }); - addMultipleToStore(*source, repair, checkSigs); -} - -void RemoteStore::addMultipleToStore(Source & source, RepairFlag repair, CheckSigsFlag checkSigs) -{ - if (getConnection()->protoVersion >= WorkerProto::Version{.number = {1, 32}}) { - auto conn(getConnection()); - conn->to << WorkerProto::Op::AddMultipleToStore << repair << !checkSigs; - conn.withFramedSink([&](Sink & sink) { source.drainInto(sink); }); - } else - Store::addMultipleToStore(source, repair, checkSigs); + conn->to << WorkerProto::Op::AddMultipleToStore << repair << !checkSigs; + conn.withFramedSink([&](Sink & sink) { source->drainInto(sink); }); } void RemoteStore::registerDrvOutput(const Realisation & info) @@ -780,6 +791,16 @@ void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log) readInt(conn->from); } +std::vector RemoteStore::queryActiveBuilds() +{ + auto conn(getConnection()); + if (!conn->protoVersion.features.count(WorkerProto::featureQueryActiveBuilds)) + throw Error("remote store does not support querying active builds"); + conn->to << WorkerProto::Op::QueryActiveBuilds; + conn.processStderr(); + return nlohmann::json::parse(readString(conn->from)).get>(); +} + std::optional RemoteStore::getVersion() { auto conn(getConnection()); diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index 5248a313c240..06bf5746d7fb 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -98,7 +98,8 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) override; + RepairFlag repair, + std::shared_ptr provenance) override; void narFromPath(const StorePath & path, Sink & sink) override; @@ -215,9 +216,10 @@ StorePath RestrictedStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { - auto path = next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair); + auto path = next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair, provenance); goal.addDependency(path); return path; } diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index 22a023cf5695..d0da581fc0f0 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -54,6 +54,11 @@ struct alignas(8) /* Work around ASAN failures on i686-linux. */ { } + bool includeInProvenance() override + { + return true; + } + // FIXME extend daemon protocol, move implementation to RemoteStore std::optional getBuildLogExact(const StorePath & path) override { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 5b5d41623893..57952b1dfd4a 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -14,12 +14,10 @@ #include "nix/util/callback.hh" #include "nix/util/git.hh" #include "nix/util/posix-source-accessor.hh" -// FIXME this should not be here, see TODO below on -// `addMultipleToStore`. -#include "nix/store/worker-protocol.hh" #include "nix/util/signals.hh" #include "nix/util/environment-variables.hh" #include "nix/util/file-system.hh" +#include "nix/store/provenance.hh" #include "store-config-private.hh" @@ -28,8 +26,6 @@ #include "nix/util/strings.hh" -using json = nlohmann::json; - namespace nix { std::string StoreConfigBase::getDefaultNixStoreDir() @@ -140,9 +136,14 @@ StorePath Store::addToStore( std::optional storePath; auto sink = sourceToSink([&](Source & source) { LengthSource lengthSource(source); - storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair); - if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) - warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total)); + storePath = + addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair, path.getProvenance()); + if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) { + static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1"; + if (failOnLargePath) + throw Error("doesn't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + } }); dumpPath(path, *sink, fsm, filter); sink->finish(); @@ -220,23 +221,6 @@ void Store::addMultipleToStore(PathsSource && pathsToCopy, Activity & act, Repai }); } -void Store::addMultipleToStore(Source & source, RepairFlag repair, CheckSigsFlag checkSigs) -{ - auto expected = readNum(source); - for (uint64_t i = 0; i < expected; ++i) { - // FIXME we should not be using the worker protocol here, let - // alone the worker protocol with a hard-coded version! - auto info = WorkerProto::Serialise::read( - *this, - WorkerProto::ReadConn{ - .from = source, - .version = {.number = {.major = 1, .minor = 16}}, - }); - info.ultimate = false; - addToStore(info, source, repair, checkSigs); - } -} - /* The aim of this function is to compute in one pass the correct ValidPathInfo for the files that we are trying to add to the store. To accomplish that in one @@ -322,6 +306,7 @@ ValidPathInfo Store::addToStoreSlow( }), narHash); info.narSize = narSize; + info.provenance = srcPath.getProvenance(); if (!isValidPath(info.path)) { auto source = sinkToSource([&](Sink & scratchpadSink) { srcPath.dumpPath(scratchpadSink); }); @@ -599,6 +584,23 @@ ref Store::queryPathInfo(const StorePath & storePath) return promise.get_future().get(); } +std::shared_ptr Store::maybeQueryPathInfo(const StorePath & storePath) +{ + std::promise> promise; + + queryPathInfo(storePath, {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (InvalidPath &) { + promise.set_value(nullptr); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + + return promise.get_future().get(); +} + static bool goodStorePath(const StorePath & expected, const StorePath & actual) { return expected.hashPart() == actual.hashPart() @@ -920,13 +922,26 @@ makeCopyPathMessage(const StoreConfig & srcCfg, const StoreConfig & dstCfg, std: "copying path '%s' from '%s' to '%s'", storePath, srcCfg.getHumanReadableURI(), dstCfg.getHumanReadableURI()); } -void copyStorePath( +/** + * Wrap upstream provenance in a "copied" provenance record to record + * where the path was copied from. But uninformative origins like + * LocalStore are omitted. + */ +static std::shared_ptr +addCopiedProvenance(std::shared_ptr provenance, Store & srcStore) +{ + if (!srcStore.includeInProvenance()) + return provenance; + return std::make_shared(srcStore.config.getReference().render(false), provenance); +} + +std::shared_ptr copyStorePath( Store & srcStore, Store & dstStore, const StorePath & storePath, RepairFlag repair, CheckSigsFlag checkSigs) { /* Bail out early (before starting a download from srcStore) if dstStore already has this path. */ if (!repair && dstStore.isValidPath(storePath)) - return; + return nullptr; const auto & srcCfg = srcStore.config; const auto & dstCfg = dstStore.config; @@ -939,25 +954,25 @@ void copyStorePath( {storePathS, srcCfg.getHumanReadableURI(), dstCfg.getHumanReadableURI()}); PushActivity pact(act.id); - auto info = srcStore.queryPathInfo(storePath); + auto srcInfo = srcStore.queryPathInfo(storePath); + auto info = std::make_shared(*srcInfo); uint64_t total = 0; // recompute store path on the chance dstStore does it differently if (info->ca && info->references.empty()) { - auto info2 = make_ref(*info); - info2->path = + info->path = dstStore.makeFixedOutputPathFromCA(info->path.name(), info->contentAddressWithReferences().value()); if (dstStore.storeDir == srcStore.storeDir) - assert(info->path == info2->path); - info = info2; + assert(info->path == srcInfo->path); } - if (info->ultimate) { - auto info2 = make_ref(*info); - info2->ultimate = false; - info = info2; - } + info->ultimate = false; + + info->provenance = addCopiedProvenance(info->provenance, srcStore); + + if (getEnv("_NIX_TEST_CONCURRENT_SUBSTITUTION")) + std::this_thread::sleep_for(std::chrono::seconds(1)); auto source = sinkToSource( [&](Sink & sink) { @@ -976,6 +991,8 @@ void copyStorePath( }); dstStore.addToStore(*info, *source, repair, checkSigs); + + return info; } std::map copyPaths( @@ -1069,6 +1086,7 @@ std::map copyPaths( ValidPathInfo infoForDst = *info; infoForDst.path = storePathForDst; + infoForDst.provenance = addCopiedProvenance(info->provenance, srcStore); auto source = sinkToSource([&, narSize = info->narSize](Sink & sink) { // We can reasonably assume that the copy will happen whenever we diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index fc877dd56ec3..44ee4f87bb11 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -2,6 +2,7 @@ #include "nix/util/file-system-at.hh" #include "nix/util/file-system.hh" #include "nix/store/local-store.hh" +#include "nix/store/active-builds.hh" #include "nix/util/processes.hh" #include "nix/store/builtins.hh" #include "nix/store/path-references.hh" @@ -20,6 +21,9 @@ #include "nix/store/build/derivation-env-desugar.hh" #include "nix/util/terminal.hh" #include "nix/store/filetransfer.hh" +#include "nix/store/provenance.hh" + +#include #include #include @@ -117,6 +121,11 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder */ Pid pid; + /** + * Handles to track active builds for `nix ps`. + */ + std::optional activeBuildHandle; + LocalStore & store; const LocalSettings & localSettings = store.config->getLocalSettings(); @@ -274,6 +283,11 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder return acquireUserLock(settings.nixStateDir, localSettings, 1, false); } + /** + * Construct the `ActiveBuild` object for `ActiveBuildsTracker`. + */ + virtual ActiveBuild getActiveBuild(); + /** * Return the paths that should be made available in the sandbox. * This includes: @@ -538,6 +552,8 @@ bool DerivationBuilderImpl::killChild() pid.wait(); + activeBuildHandle.reset(); + miscMethods->childTerminated(); } return ret; @@ -572,6 +588,8 @@ SingleDrvOutputs DerivationBuilderImpl::unprepareBuild() root. */ killSandbox(true); + activeBuildHandle.reset(); + /* Terminate the recursive Nix daemon. */ stopDaemon(); @@ -879,17 +897,44 @@ std::optional DerivationBuilderImpl::startBuild() pid.setSeparatePG(true); + /* Make the build visible to `nix ps`. */ + if (auto tracker = dynamic_cast(&store)) + activeBuildHandle.emplace(tracker->buildStarted(getActiveBuild())); + processSandboxSetupMessages(); return builderOut.get(); } +ActiveBuild DerivationBuilderImpl::getActiveBuild() +{ + return { + .nixPid = getpid(), + .clientPid = std::nullopt, // FIXME + .clientUid = std::nullopt, // FIXME + .mainPid = pid, + .mainUser = UserInfo::fromUid(buildUser ? buildUser->getUID() : getuid()), + .startTime = buildResult.startTime, + .derivation = drvPath, + }; +} + PathsInChroot DerivationBuilderImpl::getPathsInSandbox() { /* Allow a user-configurable set of directories from the host file system. */ PathsInChroot pathsInChroot = defaultPathsInChroot; + for (auto & p : pathsInChroot) + if (!p.second.optional +#if HAVE_EMBEDDED_SANDBOX_SHELL + && p.second.source != SANDBOX_SHELL +#endif + && !maybeLstat(p.second.source)) + throw SysError( + "path %s is configured as part of the `sandbox-paths` option, but is inaccessible", + PathFmt(p.second.source)); + if (hasPrefix(store.storeDir, tmpDirInSandbox().native())) { throw Error("`sandbox-build-dir` must not contain the storeDir"); } @@ -930,8 +975,28 @@ PathsInChroot DerivationBuilderImpl::getPathsInSandbox() enum BuildHookState { stBegin, stExtraChrootDirs }; + nlohmann::json drvJson = drv; + + auto [tmpFd, drvJsonPath] = createTempFile("nix-drv-json"); + writeFile(drvJsonPath, drvJson.dump()); + AutoDelete drvJsonFile(drvJsonPath, false); + + auto hookEnv = getEnv(); + static_assert(expectedJsonVersionDerivation == 4); + hookEnv["NIX_DERIVATION_V4"] = drvJsonPath; + + auto [hookStatus, lines] = runProgram( + RunOptions{ + .program = localSettings.preBuildHook.get(), + .lookupPath = false, + .args = getPreBuildHookArgs(), + .environment = std::move(hookEnv), + }); + if (!statusOk(hookStatus)) + throw ExecError( + hookStatus, "pre-build hook '%1%' %2%", localSettings.preBuildHook, statusToString(hookStatus)); + auto state = stBegin; - auto lines = runProgram(localSettings.preBuildHook.get(), false, getPreBuildHookArgs()); auto lastPos = std::string::size_type{0}; for (auto nlPos = lines.find('\n'); nlPos != std::string::npos; nlPos = lines.find('\n', lastPos)) { auto line = lines.substr(lastPos, nlPos - lastPos); @@ -1041,7 +1106,7 @@ void DerivationBuilderImpl::processSandboxSetupMessages() "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)", store.printStorePath(drvPath), statusToString(status), - concatStringsSep("|", msgs)); + concatStringsSep("\n", msgs)); throw; } }(); @@ -1931,6 +1996,14 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() newInfo.deriver = drvPath; newInfo.ultimate = true; + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + newInfo.provenance = std::make_shared( + drvPath, + outputName, + settings.getWorkerSettings().getHostName(), + settings.getWorkerSettings().buildProvenanceTags.get(), + drv.platform, + drvProvenance); store.signPathInfo(newInfo); finish(newInfo.path); @@ -1941,8 +2014,8 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() This is also good so that if a fixed-output produces the wrong path, we still store the result (just don't consider - the derivation sucessful, so if someone fixes the problem by - just changing the wanted hash, the redownload (or whateer + the derivation successful, so if someone fixes the problem by + just changing the wanted hash, the redownload (or whatever possibly quite slow thing it was) doesn't have to be done again. */ if (newInfo.ca) @@ -1957,7 +2030,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() /* Apply output checks. This includes checking of the wanted vs got hash of fixed-outputs. */ - checkOutputs(store, drvPath, drv.outputs, drvOptions.outputChecks, infos); + checkOutputs(store, drvPath, drv.outputs, drvOptions.outputChecks, infos, *act); if (buildMode == bmCheck) { return {}; @@ -2065,6 +2138,10 @@ StorePath DerivationBuilderImpl::makeFallbackPath(const StorePath & path) #include "darwin-derivation-builder.cc" #include "external-derivation-builder.cc" +#if NIX_USE_WASMTIME +# include "wasi-derivation-builder.cc" +#endif + namespace nix { void DerivationBuilderDeleter::operator()(DerivationBuilder * builder) noexcept @@ -2109,6 +2186,11 @@ std::unique_ptr makeDerivationBuild useSandbox = params.drv.type().isSandboxed() && !params.drvOptions.noChroot; } +#if NIX_USE_WASMTIME + if (params.drv.platform == "wasm32-wasip1") + return DerivationBuilderUnique(new WasiDerivationBuilder(store, std::move(miscMethods), std::move(params))); +#endif + if (store.storeDir != store.config->realStoreDir.get()) { #ifdef __linux__ useSandbox = true; diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index 476baabe24ad..2fa8af8e1231 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -473,10 +473,10 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu sendPid.writeSide.close(); - if (helper.wait() != 0) { + if (auto status = helper.wait(); !statusOk(status)) { processSandboxSetupMessages(); // Only reached if the child process didn't send an exception. - throw Error("unable to start build process"); + throw Error("unable to start build process: %s", statusToString(status)); } userNamespaceSync.readSide = -1; @@ -835,6 +835,9 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void addDependencyImpl(const StorePath & path) override { + if (isAllowed(path)) + return; + auto [source, target] = ChrootDerivationBuilder::addDependencyPrep(path); /* Bind-mount the path into the sandbox. This requires @@ -854,8 +857,15 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu })); int status = child.wait(); - if (status != 0) - throw Error("could not add path '%s' to sandbox", store.printStorePath(path)); + if (!statusOk(status)) + throw Error("could not add path '%s' to sandbox: %s", store.printStorePath(path), statusToString(status)); + } + + ActiveBuild getActiveBuild() override + { + auto build = DerivationBuilderImpl::getActiveBuild(); + build.cgroup = cgroup; + return build; } }; diff --git a/src/libstore/unix/build/wasi-derivation-builder.cc b/src/libstore/unix/build/wasi-derivation-builder.cc new file mode 100644 index 000000000000..126e95944622 --- /dev/null +++ b/src/libstore/unix/build/wasi-derivation-builder.cc @@ -0,0 +1,80 @@ +#include + +namespace nix { + +// FIXME: cut&paste +template +T unwrap(wasmtime::Result && res) +{ + if (res) + return res.ok(); + throw Error(res.err().message()); +} + +// FIXME: cut&paste +static std::span string2span(std::string_view s) +{ + return std::span((uint8_t *) s.data(), s.size()); +} + +struct WasiDerivationBuilder : DerivationBuilderImpl +{ + WasiDerivationBuilder( + LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) + : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) + { + experimentalFeatureSettings.require(Xp::WasmDerivations); + } + + void execBuilder(const Strings & args, const Strings & envStrs) override + { + using namespace wasmtime; + + Engine engine; + Linker linker(engine); + unwrap(linker.define_wasi()); + + WasiConfig wasiConfig; + wasiConfig.inherit_stdin(); + wasiConfig.inherit_stdout(); + wasiConfig.inherit_stderr(); + wasiConfig.argv(std::vector(args.begin(), args.end())); + { + std::vector> env2; + for (auto & [k, v] : env) + env2.emplace_back(k, rewriteStrings(v, inputRewrites)); + wasiConfig.env(env2); + } + if (!wasiConfig.preopen_dir( + store.config->realStoreDir.get(), + store.storeDir, + WASMTIME_WASI_DIR_PERMS_READ | WASMTIME_WASI_DIR_PERMS_WRITE, + WASMTIME_WASI_FILE_PERMS_READ | WASMTIME_WASI_FILE_PERMS_WRITE)) + throw Error("cannot add store directory to WASI config"); + if (!wasiConfig.preopen_dir( + tmpDir, + tmpDirInSandbox(), + WASMTIME_WASI_DIR_PERMS_READ | WASMTIME_WASI_DIR_PERMS_WRITE, + WASMTIME_WASI_FILE_PERMS_READ | WASMTIME_WASI_FILE_PERMS_WRITE)) + throw Error("cannot add temporary directory to WASI config"); + + auto module = unwrap(Module::compile(engine, string2span(readFile(realPathInHost(drv.builder))))); + wasmtime::Store wasmStore(engine); + unwrap(wasmStore.context().set_wasi(std::move(wasiConfig))); + auto instance = unwrap(linker.instantiate(wasmStore, module)); + + auto startName = "_start"; + auto ext = instance.get(wasmStore, startName); + if (!ext) + throw Error("Wasm module '%s' does not export function '%s'", drv.builder, startName); + auto fun = std::get_if(&*ext); + if (!fun) + throw Error("export '%s' of Wasm module '%s' is not a function", startName, drv.builder); + + unwrap(fun->call(wasmStore.context(), {})); + + _exit(0); + } +}; + +} // namespace nix diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 49ab4c36b1ed..3c7acbfb4784 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -8,6 +8,7 @@ #include "nix/store/worker-protocol-impl.hh" #include "nix/util/archive.hh" #include "nix/store/path-info.hh" +#include "nix/util/provenance.hh" #include #include @@ -20,6 +21,12 @@ const WorkerProto::Version WorkerProto::latest = { .major = 1, .minor = 38, }, + .features = + { + std::string{WorkerProto::featureQueryActiveBuilds}, + std::string{WorkerProto::featureProvenance}, + std::string{WorkerProto::featureVersionedAddToStoreMultiple}, + }, }; const WorkerProto::Version WorkerProto::minimum = { @@ -346,6 +353,8 @@ UnkeyedValidPathInfo WorkerProto::Serialise::read(const St info.sigs = WorkerProto::Serialise>::read(store, conn); info.ca = ContentAddress::parseOpt(readString(conn.from)); } + if (conn.version.features.contains(WorkerProto::featureProvenance)) + info.provenance = Provenance::from_json_str_optional(readString(conn.from)); return info; } @@ -361,6 +370,8 @@ void WorkerProto::Serialise::write( WorkerProto::write(store, conn, pathInfo.sigs); conn.to << renderContentAddress(pathInfo.ca); } + if (conn.version.features.contains(WorkerProto::featureProvenance)) + conn.to << (pathInfo.provenance ? pathInfo.provenance->to_json_str() : ""); } WorkerProto::ClientHandshakeInfo diff --git a/src/libutil-c/meson.build b/src/libutil-c/meson.build index 1806dbb6f9a0..93817efd726b 100644 --- a/src/libutil-c/meson.build +++ b/src/libutil-c/meson.build @@ -57,7 +57,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libutil-c/nix_api_util.cc b/src/libutil-c/nix_api_util.cc index f28a9168e30b..23eafa8e8ff3 100644 --- a/src/libutil-c/nix_api_util.cc +++ b/src/libutil-c/nix_api_util.cc @@ -2,6 +2,7 @@ #include "nix/util/config-global.hh" #include "nix/util/error.hh" #include "nix_api_util_internal.h" +#include "nix/util/signals.hh" #include "nix/util/util.hh" #include @@ -111,6 +112,9 @@ nix_err nix_libutil_init(nix_c_context * context) context->last_err_code = NIX_OK; try { nix::initLibUtil(); +#ifndef _WIN32 + nix::unix::startSignalHandlerThread(); +#endif return NIX_OK; } NIXC_CATCH_ERRS diff --git a/src/libutil-c/package.nix b/src/libutil-c/package.nix index f26f57775d4f..a1605bf5bb85 100644 --- a/src/libutil-c/package.nix +++ b/src/libutil-c/package.nix @@ -14,7 +14,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-c"; + pname = "determinate-nix-util-c"; inherit version; workDir = ./.; diff --git a/src/libutil-test-support/meson.build b/src/libutil-test-support/meson.build index 64231107eb6b..f9254b616725 100644 --- a/src/libutil-test-support/meson.build +++ b/src/libutil-test-support/meson.build @@ -47,7 +47,7 @@ this_library = library( # TODO: Remove `-lrapidcheck` when https://github.com/emil-e/rapidcheck/pull/326 # is available. See also ../libutil/build.meson link_args : linker_export_flags + [ '-lrapidcheck' ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libutil-test-support/package.nix b/src/libutil-test-support/package.nix index f8e92c271137..40ff65d61357 100644 --- a/src/libutil-test-support/package.nix +++ b/src/libutil-test-support/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libutil-tests/config.cc b/src/libutil-tests/config.cc index 6e90dcf11bbe..11dcdea81cc8 100644 --- a/src/libutil-tests/config.cc +++ b/src/libutil-tests/config.cc @@ -218,7 +218,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description", {}, true, - Xp::Flakes, + Xp::CaDerivations, }; setting.assign("value"); @@ -231,7 +231,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description": "description\n", "documentDefault": true, "value": "value", - "experimentalFeature": "flakes" + "experimentalFeature": "ca-derivations" } })#"_json); } diff --git a/src/libutil-tests/cxa-throw.cc b/src/libutil-tests/cxa-throw.cc new file mode 100644 index 000000000000..db002bc2172f --- /dev/null +++ b/src/libutil-tests/cxa-throw.cc @@ -0,0 +1,35 @@ +#include + +#include +#include + +TEST(CxaThrow, catchesLogicErrorFromStdlib) +{ + const char * volatile p = nullptr; + ASSERT_DEATH({ std::string s(p); }, ""); +} + +TEST(CxaThrow, catchesLogicError) +{ + ASSERT_DEATH({ throw std::logic_error("test"); }, ""); +} + +TEST(CxaThrow, catchesOutOfRange) +{ + ASSERT_DEATH({ throw std::out_of_range("test"); }, ""); +} + +TEST(CxaThrow, catchesInvalidArgument) +{ + ASSERT_DEATH({ throw std::invalid_argument("test"); }, ""); +} + +TEST(CxaThrow, catchesDomainError) +{ + ASSERT_DEATH({ throw std::domain_error("test"); }, ""); +} + +TEST(CxaThrow, catchesLengthError) +{ + ASSERT_DEATH({ throw std::length_error("test"); }, ""); +} diff --git a/src/libutil-tests/fun.cc b/src/libutil-tests/fun.cc index e763ac018e7f..166a181d0670 100644 --- a/src/libutil-tests/fun.cc +++ b/src/libutil-tests/fun.cc @@ -2,6 +2,7 @@ #include #include "nix/util/fun.hh" +#include "util-tests-config.hh" namespace nix { @@ -39,19 +40,31 @@ TEST(fun, moveConstructFromStdFunction) TEST(fun, rejectsEmptyStdFunction) { std::function empty; +#if HAVE_CXA_THROW + ASSERT_DEATH((fun{empty}), "invalid_argument"); +#else EXPECT_THROW((fun{empty}), std::invalid_argument); +#endif } TEST(fun, rejectsEmptyStdFunctionMove) { std::function empty; +#if HAVE_CXA_THROW + ASSERT_DEATH((fun{std::move(empty)}), "invalid_argument"); +#else EXPECT_THROW((fun{std::move(empty)}), std::invalid_argument); +#endif } TEST(fun, rejectsNullFunctionPointer) { int (*nullFp)(int) = nullptr; +#if HAVE_CXA_THROW + ASSERT_DEATH((fun{nullFp}), "invalid_argument"); +#else EXPECT_THROW((fun{nullFp}), std::invalid_argument); +#endif } TEST(fun, nullptrDeletedAtCompileTime) diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index bcc9a607a179..5938aad02f0b 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -36,16 +36,17 @@ deps_private += gtest gmock = dependency('gmock') deps_private += gmock +subdir('nix-meson-build-support/common') + configdata = configuration_data() configdata.set_quoted('PACKAGE_VERSION', meson.project_version()) +configdata.set('HAVE_CXA_THROW', have_cxa_throw.to_int()) config_priv_h = configure_file( configuration : configdata, output : 'util-tests-config.hh', ) -subdir('nix-meson-build-support/common') - sources = files( 'alignment.cc', 'archive.cc', @@ -91,6 +92,10 @@ sources = files( 'xml-writer.cc', ) +if have_cxa_throw + sources += files('cxa-throw.cc') +endif + if host_machine.system() != 'windows' subdir('unix') endif diff --git a/src/libutil-tests/unix/file-system-at.cc b/src/libutil-tests/unix/file-system-at.cc index 609c4e2aa1d1..b43c2c1c2c5a 100644 --- a/src/libutil-tests/unix/file-system-at.cc +++ b/src/libutil-tests/unix/file-system-at.cc @@ -96,22 +96,22 @@ TEST(fchmodatTryNoFollow, fallbackWithoutProc) Pid pid = startProcess( [&] { if (unshare(CLONE_NEWNS) == -1) - _exit(1); + _exit(2); if (mount(0, "/", 0, MS_PRIVATE | MS_REC, 0) == -1) - _exit(1); + _exit(2); if (mount("tmpfs", "/proc", "tmpfs", 0, 0) == -1) - _exit(1); + _exit(2); auto dirFd = openDirectory(tmpDir); if (!dirFd) - exit(1); + _exit(3); try { fchmodatTryNoFollow(dirFd.get(), CanonPath("file"), 0600); } catch (SysError & e) { - _exit(1); + _exit(4); } try { @@ -121,12 +121,15 @@ TEST(fchmodatTryNoFollow, fallbackWithoutProc) _exit(0); /* Success. */ } - _exit(1); /* Didn't throw the expected exception. */ + _exit(5); /* Didn't throw the expected exception. */ }, {.cloneFlags = CLONE_NEWUSER}); int status = pid.wait(); - ASSERT_TRUE(statusOk(status)); + EXPECT_TRUE(WIFEXITED(status)); + if (WEXITSTATUS(status) == 2) + GTEST_SKIP() << "Could not mount, system may be misconfigured"; + EXPECT_EQ(WEXITSTATUS(status), 0); struct ::stat st; ASSERT_EQ(stat((tmpDir / "file").c_str(), &st), 0); diff --git a/src/libutil/args.cc b/src/libutil/args.cc index c025f119f725..269a2ce7c7d8 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -513,7 +513,7 @@ void Args::checkArgs() { for (auto & [name, flag] : longFlags) { if (flag->required && flag->timesUsed == 0) - throw UsageError("required argument '--%s' is missing", name); + throw UsageError("required argument '%s' is missing", "--" + name); } } @@ -607,7 +607,7 @@ Strings argvToStrings(int argc, char ** argv) std::optional Command::experimentalFeature() { - return {Xp::NixCommand}; + return {}; } MultiCommand::MultiCommand(std::string_view commandName, const Commands & commands_) @@ -632,7 +632,7 @@ MultiCommand::MultiCommand(std::string_view commandName, const Commands & comman }}, .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { for (auto & [name, command] : commands) - if (hasPrefix(name, prefix)) + if (hasPrefix(name, prefix) && !hasPrefix(name, "__")) completions.add(name); }}}); @@ -671,6 +671,8 @@ nlohmann::json MultiCommand::toJSON() auto command = commandFun(); auto j = command->toJSON(); auto cat = nlohmann::json::object(); + if (command->category() == Command::catUndocumented) + continue; cat["id"] = command->category(); cat["description"] = trim(categories[command->category()]); cat["experimental-feature"] = command->experimentalFeature(); diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index 297d4221f18e..d99d362c4a55 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -421,11 +421,11 @@ std::set BaseSetting>::parse( { std::set res; for (auto & s : tokenizeString(str)) { - if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { + if (auto thisXpFeature = parseExperimentalFeature(s)) res.insert(thisXpFeature.value()); - if (thisXpFeature.value() == Xp::Flakes) - res.insert(Xp::FetchTree); - } else if (s == "no-url-literals") + else if (stabilizedFeatures.count(s)) + debug("experimental feature '%s' is now stable", s); + else if (s == "no-url-literals") warn( "experimental feature '%s' has been stabilized and renamed; use 'lint-url-literals = fatal' setting instead", s); diff --git a/src/libutil/current-process.cc b/src/libutil/current-process.cc index 8f7150c97de6..d11b371345b2 100644 --- a/src/libutil/current-process.cc +++ b/src/libutil/current-process.cc @@ -12,6 +12,7 @@ #ifdef __APPLE__ # include +# include #endif #ifdef __linux__ @@ -116,6 +117,15 @@ void restoreProcessContext(bool restoreMounts) } } #endif + +#ifdef __APPLE__ + /* Reset the Mach exception ports. Otherwise, if a crashpad_handler is attached to this process, it will be + inherited across execve() and receive spurious crash reports from unrelated programs (e.g. in `nix run`). + FIXME: it would be better to have Sentry tell crashpad_handler to quit, but it doesn't appear to have an API for + that. */ + task_set_exception_ports( + mach_task_self(), EXC_MASK_ALL | EXC_MASK_CRASH, MACH_PORT_NULL, EXCEPTION_DEFAULT, THREAD_STATE_NONE); +#endif } ////////////////////////////////////////////////////////////////////// diff --git a/src/libutil/error.cc b/src/libutil/error.cc index 5b91393716ad..8d6a1d9d9f5b 100644 --- a/src/libutil/error.cc +++ b/src/libutil/error.cc @@ -476,17 +476,25 @@ int handleExceptions(const std::string & programName, fun body) ErrorInfo::programName = baseNameOf(programName); + auto doLog = [&](BaseError & e) { + try { + logError(e.info()); + } catch (...) { + printError(ANSI_RED "error:" ANSI_NORMAL " Exception while printing an exception."); + } + }; + std::string error = ANSI_RED "error:" ANSI_NORMAL " "; try { body(); } catch (Exit & e) { return e.status; } catch (UsageError & e) { - logError(e.info()); - printError("Try '%1% --help' for more information.", programName); + doLog(e); + printError("\nTry '%1% --help' for more information.", programName); return 1; } catch (BaseError & e) { - logError(e.info()); + doLog(e); return e.info().status; } catch (std::bad_alloc & e) { printError(error + "out of memory"); diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 5ce11836b0ba..334a410bb863 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -17,7 +17,7 @@ struct ExperimentalFeatureDetails /** * If two different PRs both add an experimental feature, and we just - * used a number for this, we *woudln't* get merge conflict and the + * used a number for this, we *wouldn't* get merge conflict and the * counter will be incremented once instead of twice, causing a build * failure. * @@ -25,7 +25,7 @@ struct ExperimentalFeatureDetails * feature, we either have no issue at all if few features are not added * at the end of the list, or a proper merge conflict if they are. */ -constexpr size_t numXpFeatures = 1 + static_cast(Xp::BLAKE3Hashes); +constexpr size_t numXpFeatures = 1 + static_cast(Xp::Provenance); constexpr std::array xpFeatureDetails = {{ { @@ -71,38 +71,21 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/42", }, - { - .tag = Xp::Flakes, - .name = "flakes", - .description = R"( - Enable flakes. See the manual entry for [`nix - flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/27", - }, { .tag = Xp::FetchTree, .name = "fetch-tree", .description = R"( + *Enabled for Determinate Nix Installer users since 2.24* + Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. `fetchTree` exposes a generic interface for fetching remote file system trees from different types of remote sources. - The [`flakes`](#xp-feature-flakes) feature flag always enables `fetch-tree`. This built-in was previously guarded by the `flakes` experimental feature because of that overlap. Enabling just this feature serves as a "release candidate", allowing users to try it out in isolation. )", .trackingUrl = "https://github.com/NixOS/nix/milestone/31", }, - { - .tag = Xp::NixCommand, - .name = "nix-command", - .description = R"( - Enable the new `nix` subcommands. See the manual on - [`nix`](@docroot@/command-ref/new-cli/nix.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/28", - }, { .tag = Xp::GitHashing, .name = "git-hashing", @@ -279,6 +262,49 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/60", }, + { + .tag = Xp::BuildTimeFetchTree, + .name = "build-time-fetch-tree", + .description = R"( + Enable the built-in derivation `builtin:fetch-tree`, as well as the flake input attribute `buildTime`. + )", + .trackingUrl = "", + }, + { + .tag = Xp::ParallelEval, + .name = "parallel-eval", + .description = R"( + Enable built-in functions for parallel evaluation. + )", + .trackingUrl = "", + }, + { + .tag = Xp::WasmBuiltin, + .name = "wasm-builtin", + .description = R"( + Enable the use of the [`builtins.wasm`](@docroot@/language/builtins.md) built-in function in the Nix language. + `builtins.wasm` allows calling WebAssembly functions from Nix expressions. + )", + .trackingUrl = "", + }, + { + .tag = Xp::WasmDerivations, + .name = "wasm-derivations", + .description = R"( + Allow derivations to target the WebAssembly system type (`wasm32-wasip1`). + When enabled, derivations with `system = "wasm32-wasip1"` can be built locally + using a WASI runtime environment. + )", + .trackingUrl = "", + }, + { + .tag = Xp::Provenance, + .name = "provenance", + .description = R"( + Enable keeping track of the provenance of store paths. + )", + .trackingUrl = "", + }, }}; static_assert( @@ -290,6 +316,12 @@ static_assert( }(), "array order does not match enum tag order"); +/** + * A set of previously experimental features that are now considered + * stable. We don't warn if users have these in `experimental-features`. + */ +std::set stabilizedFeatures{"flakes", "nix-command"}; + const std::optional parseExperimentalFeature(const std::string_view & name) { using ReverseXpMap = std::map; diff --git a/src/libutil/include/nix/util/args.hh b/src/libutil/include/nix/util/args.hh index b4337590a39f..fb80416fe38c 100644 --- a/src/libutil/include/nix/util/args.hh +++ b/src/libutil/include/nix/util/args.hh @@ -377,6 +377,7 @@ struct Command : virtual public Args using Category = int; static constexpr Category catDefault = 0; + static constexpr Category catUndocumented = 1; virtual std::optional experimentalFeature(); diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 6fa5e47db5b6..b89e17d652f2 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -406,6 +406,20 @@ public: } }; +/** + * A setting whose value is represented as JSON. The type `T` must be supported by `nlohmann::json`'s `get()`. + */ +template +class JSONSetting : public Setting +{ +public: + using Setting::Setting; + + T parse(const std::string & str) const override; + + std::string to_string() const override; +}; + /* Delete these overloads to avoid footguns with implicit quoting of Setting in fmt(). */ template @@ -436,7 +450,7 @@ struct ExperimentalFeatureSettings : Config Example: ``` - experimental-features = nix-command flakes + experimental-features = ca-derivations ``` The following experimental features are available: diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 1a97f269144d..85ca58e23da1 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -19,9 +19,7 @@ namespace nix { enum struct ExperimentalFeature { CaDerivations, ImpureDerivations, - Flakes, FetchTree, - NixCommand, GitHashing, RecursiveNix, FetchClosure, @@ -38,8 +36,15 @@ enum struct ExperimentalFeature { PipeOperators, ExternalBuilders, BLAKE3Hashes, + BuildTimeFetchTree, + ParallelEval, + WasmBuiltin, + WasmDerivations, + Provenance, }; +extern std::set stabilizedFeatures; + /** * Just because writing `ExperimentalFeature::CaDerivations` is way too long */ diff --git a/src/libutil/include/nix/util/forwarding-source-accessor.hh b/src/libutil/include/nix/util/forwarding-source-accessor.hh new file mode 100644 index 000000000000..c9693b9e5f92 --- /dev/null +++ b/src/libutil/include/nix/util/forwarding-source-accessor.hh @@ -0,0 +1,52 @@ +#pragma once + +#include "source-accessor.hh" + +namespace nix { + +/** + * A source accessor that just forwards every operation to another + * accessor. This is not useful in itself but can be used as a + * superclass for accessors that do change some operations. + */ +struct ForwardingSourceAccessor : SourceAccessor +{ + ref next; + + ForwardingSourceAccessor(ref next) + : next(next) + { + } + + void readFile(const CanonPath & path, Sink & sink, fun sizeCallback) override + { + next->readFile(path, sink, sizeCallback); + } + + std::optional maybeLstat(const CanonPath & path) override + { + return next->maybeLstat(path); + } + + DirEntries readDirectory(const CanonPath & path) override + { + return next->readDirectory(path); + } + + std::string readLink(const CanonPath & path) override + { + return next->readLink(path); + } + + std::string showPath(const CanonPath & path) override + { + return next->showPath(path); + } + + std::optional getPhysicalPath(const CanonPath & path) override + { + return next->getPhysicalPath(path); + } +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/logging.hh b/src/libutil/include/nix/util/logging.hh index 6f559b796c8e..7793426da89d 100644 --- a/src/libutil/include/nix/util/logging.hh +++ b/src/libutil/include/nix/util/logging.hh @@ -40,6 +40,8 @@ typedef enum { resSetExpected = 106, resPostBuildLogLine = 107, resFetchStatus = 108, + resHashMismatch = 109, + resBuildResult = 110, } ResultType; typedef uint64_t ActivityId; @@ -60,7 +62,7 @@ struct LoggerSettings : Config {}, "json-log-path", R"( - A file or unix socket to which JSON records of Nix's log output are + A file or Unix domain socket to which JSON records of Nix's log output are written, in the same format as `--log-format internal-json` (without the `@nix ` prefixes on each line). Concurrent writes to the same file by multiple Nix processes are not supported and @@ -159,6 +161,8 @@ public: virtual void result(ActivityId act, ResultType type, const Fields & fields) {}; + virtual void result(ActivityId act, ResultType type, const nlohmann::json & json) {}; + virtual void writeToStdout(std::string_view s); template @@ -223,6 +227,11 @@ struct Activity result(resSetExpected, type2, expected); } + void result(ResultType type, const nlohmann::json & json) const + { + logger.result(id, type, json); + } + template void result(ResultType type, const Args &... args) const { diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index 59dce90e1aa3..8682f9c4dc16 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -47,6 +47,7 @@ headers = [ config_pub_h ] + files( 'file-system.hh', 'finally.hh', 'fmt.hh', + 'forwarding-source-accessor.hh', 'fs-sink.hh', 'fun.hh', 'git.hh', @@ -64,12 +65,14 @@ headers = [ config_pub_h ] + files( 'nar-cache.hh', 'nar-listing.hh', 'os-string.hh', + 'override-provenance-source-accessor.hh', 'pool.hh', 'pos-idx.hh', 'pos-table.hh', 'position.hh', 'posix-source-accessor.hh', 'processes.hh', + 'provenance.hh', 'ref.hh', 'regex-combinators.hh', 'repair-flag.hh', diff --git a/src/libutil/include/nix/util/override-provenance-source-accessor.hh b/src/libutil/include/nix/util/override-provenance-source-accessor.hh new file mode 100644 index 000000000000..5ed937db02ad --- /dev/null +++ b/src/libutil/include/nix/util/override-provenance-source-accessor.hh @@ -0,0 +1,21 @@ +#pragma once + +#include "nix/util/forwarding-source-accessor.hh" + +namespace nix { + +struct OverrideProvenanceSourceAccessor : ForwardingSourceAccessor +{ + OverrideProvenanceSourceAccessor(ref next, std::shared_ptr provenance) + : ForwardingSourceAccessor(std::move(next)) + { + this->provenance = std::move(provenance); + } + + std::shared_ptr getProvenance(const CanonPath & path) override + { + return provenance; + } +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/pos-idx.hh b/src/libutil/include/nix/util/pos-idx.hh index 8e668176c619..7b7d16ca3a4d 100644 --- a/src/libutil/include/nix/util/pos-idx.hh +++ b/src/libutil/include/nix/util/pos-idx.hh @@ -15,12 +15,12 @@ class PosIdx private: uint32_t id; +public: explicit PosIdx(uint32_t id) : id(id) { } -public: PosIdx() : id(0) { @@ -45,6 +45,11 @@ public: { return std::hash{}(id); } + + uint32_t get() const + { + return id; + } }; inline PosIdx noPos = {}; diff --git a/src/libutil/include/nix/util/pos-table.hh b/src/libutil/include/nix/util/pos-table.hh index c5f93a3d5979..954138afbc8e 100644 --- a/src/libutil/include/nix/util/pos-table.hh +++ b/src/libutil/include/nix/util/pos-table.hh @@ -49,20 +49,29 @@ private: */ using LinesCache = LRUCache; - std::map origins; - mutable Sync linesCache; + // FIXME: this could be made lock-free (at least for access) if we + // have a data structure where pointers to existing positions are + // never invalidated. + struct State + { + std::map origins; + }; + + SharedSync state_; + const Origin * resolve(PosIdx p) const { if (p.id == 0) return nullptr; + auto state(state_.readLock()); const auto idx = p.id - 1; - /* we want the last key <= idx, so we'll take prev(first key > idx). - this is guaranteed to never rewind origin.begin because the first - key is always 0. */ - const auto pastOrigin = origins.upper_bound(idx); + /* We want the last key <= idx, so we'll take prev(first key > + idx). This is guaranteed to never rewind origin.begin + because the first key is always 0. */ + const auto pastOrigin = state->origins.upper_bound(idx); return &std::prev(pastOrigin)->second; } @@ -74,15 +83,16 @@ public: Origin addOrigin(Pos::Origin origin, size_t size) { + auto state(state_.lock()); uint32_t offset = 0; - if (auto it = origins.rbegin(); it != origins.rend()) + if (auto it = state->origins.rbegin(); it != state->origins.rend()) offset = it->first + it->second.size; // +1 because all PosIdx are offset by 1 to begin with, and // another +1 to ensure that all origins can point to EOF, eg // on (invalid) empty inputs. if (2 + offset + size < offset) return Origin{origin, offset, 0}; - return origins.emplace(offset, Origin{origin, offset, size}).first->second; + return state->origins.emplace(offset, Origin{origin, offset, size}).first->second; } PosIdx add(const Origin & origin, size_t offset) @@ -119,7 +129,7 @@ public: { auto lines = linesCache.lock(); lines->clear(); - origins.clear(); + state_.lock()->origins.clear(); } }; diff --git a/src/libutil/include/nix/util/provenance.hh b/src/libutil/include/nix/util/provenance.hh new file mode 100644 index 000000000000..da8005b31817 --- /dev/null +++ b/src/libutil/include/nix/util/provenance.hh @@ -0,0 +1,59 @@ +#pragma once + +#include "nix/util/ref.hh" +#include "nix/util/canon-path.hh" + +#include + +#include + +namespace nix { + +struct Provenance +{ + virtual ~Provenance() = default; + + static ref from_json_str(std::string_view); + + static std::shared_ptr from_json_str_optional(std::string_view); + + static ref from_json(const nlohmann::json & json); + + std::string to_json_str() const; + + virtual nlohmann::json to_json() const = 0; + +protected: + + using ProvenanceFactory = std::function(nlohmann::json)>; + + using RegisteredTypes = std::map; + + static RegisteredTypes & registeredTypes(); + +public: + + struct Register + { + Register(const std::string & type, ProvenanceFactory && factory) + { + registeredTypes().insert_or_assign(type, std::move(factory)); + } + }; +}; + +struct SubpathProvenance : public Provenance +{ + std::shared_ptr next; + CanonPath subpath; + + SubpathProvenance(std::shared_ptr next, const CanonPath & subpath) + : next(std::move(next)) + , subpath(subpath) + { + } + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/serialise.hh b/src/libutil/include/nix/util/serialise.hh index f8b545f49b9a..0ba4a427262b 100644 --- a/src/libutil/include/nix/util/serialise.hh +++ b/src/libutil/include/nix/util/serialise.hh @@ -744,4 +744,52 @@ struct FramedSink : nix::BufferedSink }; }; +/** + * A wrapper source that ensures that at least a specified number of bytes are read from the underlying source. + */ +struct EnsureRead : Source +{ + Source & source; + uint64_t bytesRead = 0, bytesExpected; + + EnsureRead(Source & source, uint64_t bytesExpected) + : source(source) + , bytesExpected(bytesExpected) + { + } + + ~EnsureRead() + { + try { + finish(); + } catch (...) { + ignoreExceptionInDestructor(); + } + } + + void finish() + { + if (bytesRead < bytesExpected) + skip(bytesExpected - bytesRead); + } + + size_t read(char * data, size_t len) override + { + auto n = source.read(data, len); + bytesRead += n; + return n; + } + + bool good() override + { + return source.good(); + } + + void skip(size_t len) override + { + source.skip(len); + bytesRead += len; + } +}; + } // namespace nix diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index bcc25b99a5aa..cf0a872d7f5f 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -10,6 +10,7 @@ namespace nix { struct Sink; +struct Provenance; /** * Note there is a decent chance this type soon goes away because the problem is solved another way. @@ -214,6 +215,13 @@ struct SourceAccessor : std::enable_shared_from_this return std::nullopt; } + std::shared_ptr provenance; + + /** + * Return the provenance of the specified path, or `nullptr` if not available. + */ + virtual std::shared_ptr getProvenance(const CanonPath & path); + /** * Invalidate any cached value the accessor may have for the specified path. */ diff --git a/src/libutil/include/nix/util/source-path.hh b/src/libutil/include/nix/util/source-path.hh index 24932e4cddbe..677457f8ed48 100644 --- a/src/libutil/include/nix/util/source-path.hh +++ b/src/libutil/include/nix/util/source-path.hh @@ -114,6 +114,11 @@ struct SourcePath return {accessor, accessor->resolveSymlinks(path, mode)}; } + std::shared_ptr getProvenance() const + { + return accessor->getProvenance(path); + } + void invalidateCache() const { accessor->invalidateCache(path); diff --git a/src/libutil/include/nix/util/table.hh b/src/libutil/include/nix/util/table.hh index 13e4506d5a30..0af33b66cc3b 100644 --- a/src/libutil/include/nix/util/table.hh +++ b/src/libutil/include/nix/util/table.hh @@ -2,10 +2,26 @@ #include "nix/util/types.hh" +#include + namespace nix { -typedef std::vector> Table; +struct TableCell +{ + std::string content; + + enum Alignment { Left, Right } alignment = Left; + + TableCell(std::string content, Alignment alignment = Left) + : content(std::move(content)) + , alignment(alignment) + { + } +}; + +using TableRow = std::vector; +using Table = std::vector; -void printTable(std::ostream & out, Table & table); +void printTable(std::ostream & out, Table & table, unsigned int width = std::numeric_limits::max()); } // namespace nix diff --git a/src/libutil/include/nix/util/tarfile.hh b/src/libutil/include/nix/util/tarfile.hh index 324c7c8a8a99..c66e05ef6709 100644 --- a/src/libutil/include/nix/util/tarfile.hh +++ b/src/libutil/include/nix/util/tarfile.hh @@ -37,6 +37,8 @@ struct TarArchive int getArchiveFilterCodeByName(const std::string & method); +void unpackTarfile(Source & source, const std::filesystem::path & destDir); + void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir); time_t unpackTarfileToSink(TarArchive & archive, ExtendedFileSystemObjectSink & parseSink); diff --git a/src/libutil/include/nix/util/terminal.hh b/src/libutil/include/nix/util/terminal.hh index 5e35cbb95408..c70006bc51e3 100644 --- a/src/libutil/include/nix/util/terminal.hh +++ b/src/libutil/include/nix/util/terminal.hh @@ -44,6 +44,11 @@ void updateWindowSize(); */ std::pair getWindowSize(); +/** + * @return The number of columns of the terminal, or std::numeric_limits::max() if unknown. + */ +unsigned int getWindowWidth(); + /** * Get the slave name of a pseudoterminal in a thread-safe manner. * diff --git a/src/libutil/include/nix/util/thread-pool.hh b/src/libutil/include/nix/util/thread-pool.hh index d41e7a6f76e2..a6bb7cf02d1c 100644 --- a/src/libutil/include/nix/util/thread-pool.hh +++ b/src/libutil/include/nix/util/thread-pool.hh @@ -87,21 +87,27 @@ private: * its dependencies have been processed. */ template -void processGraph(const std::set & nodes, fun(const T &)> getEdges, fun processNode) +void processGraph( + const std::set & nodes, + fun(const T &)> getEdges, + fun processNode, + bool discoverNodes = false, + size_t maxThreads = 0) { struct Graph { + std::set known; std::set left; std::map> refs, rrefs; }; - Sync graph_(Graph{nodes, {}, {}}); + Sync graph_(Graph{nodes, nodes, {}, {}}); std::function worker; - /* Create pool last to ensure threads are stopped before other destructors - * run */ - ThreadPool pool; + /* Create pool last to ensure threads are stopped before other + destructors run. */ + ThreadPool pool(maxThreads); worker = [&](const T & node) { { @@ -118,11 +124,19 @@ void processGraph(const std::set & nodes, fun(const T &)> getEdge { auto graph(graph_.lock()); - for (auto & ref : refs) + for (auto & ref : refs) { + if (discoverNodes) { + auto [i, inserted] = graph->known.insert(ref); + if (inserted) { + pool.enqueue(std::bind(worker, std::ref(*i))); + graph->left.insert(ref); + } + } if (graph->left.count(ref)) { graph->refs[node].insert(ref); graph->rrefs[ref].insert(node); } + } if (graph->refs[node].empty()) goto doWork; } diff --git a/src/libutil/include/nix/util/url.hh b/src/libutil/include/nix/util/url.hh index b1cc1155a66a..5099b8bdb240 100644 --- a/src/libutil/include/nix/util/url.hh +++ b/src/libutil/include/nix/util/url.hh @@ -229,6 +229,11 @@ struct ParsedURL */ std::string renderPath(bool encode = false) const; + /** + * Like to_string(), but removes query strings and passwords. + */ + std::string renderSanitized() const; + auto operator<=>(const ParsedURL & other) const noexcept = default; /** diff --git a/src/libutil/include/nix/util/util.hh b/src/libutil/include/nix/util/util.hh index 144c83cc0c9f..5f147716c0ad 100644 --- a/src/libutil/include/nix/util/util.hh +++ b/src/libutil/include/nix/util/util.hh @@ -245,6 +245,11 @@ void ignoreExceptionInDestructor(Verbosity lvl = lvlError); */ void ignoreExceptionExceptInterrupt(Verbosity lvl = lvlError); +/** + * Like ignoreExceptionExceptInterrupt(), but specifies the error prefix. + */ +void logExceptionExceptInterrupt(std::string_view prefix = "error: ", Verbosity lvl = lvlError); + /** * Tree formatting. */ @@ -316,9 +321,15 @@ typename T::mapped_type * get(T & map, const K & key) template typename T::mapped_type * get(T && map, const K & key) = delete; -/** - * Look up a value in a `boost::concurrent_flat_map`. - */ +template +std::optional getOptional(const T & map, const typename T::key_type & key) +{ + auto i = map.find(key); + if (i == map.end()) + return std::nullopt; + return {i->second}; +} + template std::optional getConcurrent(const T & map, const typename T::key_type & key) { diff --git a/src/libutil/linux/cgroup.cc b/src/libutil/linux/cgroup.cc index 3253d286194f..c79a6dd06c73 100644 --- a/src/libutil/linux/cgroup.cc +++ b/src/libutil/linux/cgroup.cc @@ -174,4 +174,23 @@ CanonPath getRootCgroup() return rootCgroup; } +std::set getPidsInCgroup(const std::filesystem::path & cgroup) +{ + if (!pathExists(cgroup)) + return {}; + + auto procsFile = cgroup / "cgroup.procs"; + + std::set result; + + for (auto & pidStr : tokenizeString>(readFile(procsFile))) { + if (auto o = string2Int(pidStr)) + result.insert(*o); + else + throw Error("invalid PID '%s'", pidStr); + } + + return result; +} + } // namespace nix diff --git a/src/libutil/linux/include/nix/util/cgroup.hh b/src/libutil/linux/include/nix/util/cgroup.hh index 6eab98ae7d81..fb437215d92b 100644 --- a/src/libutil/linux/include/nix/util/cgroup.hh +++ b/src/libutil/linux/include/nix/util/cgroup.hh @@ -41,4 +41,9 @@ CanonPath getCurrentCgroup(); */ CanonPath getRootCgroup(); +/** + * Get the PIDs of all processes in the given cgroup. + */ +std::set getPidsInCgroup(const std::filesystem::path & cgroup); + } // namespace nix diff --git a/src/libutil/linux/linux-namespaces.cc b/src/libutil/linux/linux-namespaces.cc index 83a800e4729e..39dad9268bcb 100644 --- a/src/libutil/linux/linux-namespaces.cc +++ b/src/libutil/linux/linux-namespaces.cc @@ -39,7 +39,10 @@ bool userNamespacesSupported() Pid pid = startProcess([&]() { _exit(0); }, {.cloneFlags = CLONE_NEWUSER}); auto r = pid.wait(); - assert(!r); + /* The assert is OK because if we cannot do CLONE_NEWUSER we will + throw above, and if the process does run, it must exit this way + (or something else is really wrong). */ + assert(statusOk(r)); } catch (SysError & e) { debug("user namespaces do not work on this system: %s", e.msg()); return false; @@ -72,8 +75,8 @@ bool mountAndPidNamespacesSupported() }, {.cloneFlags = CLONE_NEWNS | CLONE_NEWPID | (userNamespacesSupported() ? CLONE_NEWUSER : 0)}); - if (pid.wait()) { - debug("PID namespaces do not work on this system: cannot remount /proc"); + if (auto status = pid.wait(); !statusOk(status)) { + debug("PID namespaces do not work on this system: cannot remount /proc: %s", statusToString(status)); return false; } diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index f1c8190f7db3..b7b43e5ff60f 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -341,6 +341,16 @@ struct JSONLogger : Logger addFields(json, fields); write(json); } + + void result(ActivityId act, ResultType type, const nlohmann::json & j) override + { + nlohmann::json json; + json["action"] = "result"; + json["id"] = act; + json["type"] = type; + json["payload"] = j; + write(json); + } }; std::unique_ptr makeJSONLogger(Descriptor fd, bool includeNixPrefix) diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 71bea971dfe5..3ce3dbad2b22 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -126,6 +126,10 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') +if have_cxa_throw + deps_other += cxa_throw_dep +endif + sources = [ config_priv_h ] + files( 'archive.cc', 'args.cc', @@ -164,6 +168,7 @@ sources = [ config_priv_h ] + files( 'position.cc', 'posix-source-accessor.cc', 'processes.cc', + 'provenance.cc', 'serialise.cc', 'signature/local-keys.cc', 'signature/signer.cc', @@ -215,7 +220,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index aab95f775b77..ca6d49275b3e 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -100,6 +100,12 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor return accessor->getFingerprint(subpath); } + std::shared_ptr getProvenance(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->getProvenance(subpath); + } + void invalidateCache(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); diff --git a/src/libutil/package.nix b/src/libutil/package.nix index 3deb7ba3ae3c..287e6c6a1139 100644 --- a/src/libutil/package.nix +++ b/src/libutil/package.nix @@ -22,7 +22,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util"; + pname = "determinate-nix-util"; inherit version; workDir = ./.; diff --git a/src/libutil/position.cc b/src/libutil/position.cc index 049c95474afe..63051a9656ec 100644 --- a/src/libutil/position.cc +++ b/src/libutil/position.cc @@ -47,7 +47,7 @@ std::optional Pos::getSource() const [](const SourcePath & path) -> std::optional { try { return path.readFile(); - } catch (Error &) { + } catch (...) { // FIXME: make readFile() not throw? return std::nullopt; } }}, diff --git a/src/libutil/provenance.cc b/src/libutil/provenance.cc new file mode 100644 index 000000000000..3130e148f540 --- /dev/null +++ b/src/libutil/provenance.cc @@ -0,0 +1,74 @@ +#include "nix/util/provenance.hh" +#include "nix/util/json-utils.hh" + +namespace nix { + +struct UnknownProvenance : Provenance +{ + nlohmann::json payload; + + UnknownProvenance(nlohmann::json payload) + : payload(std::move(payload)) + { + } + + nlohmann::json to_json() const override + { + return payload; + } +}; + +Provenance::RegisteredTypes & Provenance::registeredTypes() +{ + static Provenance::RegisteredTypes types; + return types; +} + +ref Provenance::from_json_str(std::string_view s) +{ + return from_json(nlohmann::json::parse(s)); +} + +std::shared_ptr Provenance::from_json_str_optional(std::string_view s) +{ + if (s.empty()) + return nullptr; + return Provenance::from_json_str(s); +} + +ref Provenance::from_json(const nlohmann::json & json) +{ + auto & obj = getObject(json); + + auto type = getString(valueAt(obj, "type")); + + auto it = registeredTypes().find(type); + if (it == registeredTypes().end()) + return make_ref(obj); + + return it->second(obj); +} + +std::string Provenance::to_json_str() const +{ + return to_json().dump(); +} + +nlohmann::json SubpathProvenance::to_json() const +{ + return { + {"type", "subpath"}, + {"subpath", subpath.abs()}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerSubpathProvenance("subpath", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + return make_ref(next, CanonPath(getString(valueAt(obj, "subpath")))); +}); + +} // namespace nix diff --git a/src/libutil/source-accessor.cc b/src/libutil/source-accessor.cc index 20e8eb367c9f..49c9b587e91b 100644 --- a/src/libutil/source-accessor.cc +++ b/src/libutil/source-accessor.cc @@ -1,5 +1,7 @@ #include + #include "nix/util/source-accessor.hh" +#include "nix/util/provenance.hh" namespace nix { @@ -126,4 +128,9 @@ CanonPath SourceAccessor::resolveSymlinks(const CanonPath & path, SymlinkResolut return res; } +std::shared_ptr SourceAccessor::getProvenance(const CanonPath & path) +{ + return provenance && !path.isRoot() ? std::make_shared(provenance, path) : provenance; +} + } // namespace nix diff --git a/src/libutil/table.cc b/src/libutil/table.cc index fa1bf110d93f..215171dc02fc 100644 --- a/src/libutil/table.cc +++ b/src/libutil/table.cc @@ -1,4 +1,5 @@ #include "nix/util/table.hh" +#include "nix/util/terminal.hh" #include #include @@ -7,7 +8,7 @@ namespace nix { -void printTable(std::ostream & out, Table & table) +void printTable(std::ostream & out, Table & table, unsigned int width) { auto nrColumns = table.size() > 0 ? table.front().size() : 0; @@ -18,19 +19,31 @@ void printTable(std::ostream & out, Table & table) assert(i.size() == nrColumns); size_t column = 0; for (auto j = i.begin(); j != i.end(); ++j, ++column) - if (j->size() > widths[column]) - widths[column] = j->size(); + // TODO: take ANSI escapes into account when calculating width. + widths[column] = std::max(widths[column], j->content.size()); } for (auto & i : table) { size_t column = 0; + std::string line; for (auto j = i.begin(); j != i.end(); ++j, ++column) { - std::string s = *j; + std::string s = j->content; replace(s.begin(), s.end(), '\n', ' '); - out << s; - if (column < nrColumns - 1) - out << std::string(widths[column] - s.size() + 2, ' '); + + auto padding = std::string(widths[column] - s.size(), ' '); + if (j->alignment == TableCell::Right) { + line += padding; + line += s; + } else { + line += s; + if (column + 1 < nrColumns) + line += padding; + } + + if (column + 1 < nrColumns) + line += " "; } + out << filterANSIEscapes(line, false, width); out << std::endl; } } diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index eea03766375c..e71c1103735b 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -158,6 +158,14 @@ static void extract_archive(TarArchive & archive, const std::filesystem::path & archive.close(); } +void unpackTarfile(Source & source, const std::filesystem::path & destDir) +{ + auto archive = TarArchive(source); + + createDirs(destDir); + extract_archive(archive, destDir); +} + void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir) { auto archive = TarArchive(tarFile); diff --git a/src/libutil/tee-logger.cc b/src/libutil/tee-logger.cc index 8433168a5a82..889b82ca02b8 100644 --- a/src/libutil/tee-logger.cc +++ b/src/libutil/tee-logger.cc @@ -65,6 +65,12 @@ struct TeeLogger : Logger logger->result(act, type, fields); } + void result(ActivityId act, ResultType type, const nlohmann::json & json) override + { + for (auto & logger : loggers) + logger->result(act, type, json); + } + void writeToStdout(std::string_view s) override { for (auto & logger : loggers) { diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 48d692872712..52b9e51a221b 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -191,6 +191,14 @@ std::pair getWindowSize() return *windowSize->lock(); } +unsigned int getWindowWidth() +{ + unsigned int width = getWindowSize().second; + if (width <= 0) + width = std::numeric_limits::max(); + return width; +} + #ifndef _WIN32 std::string getPtsName(int fd) { diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index de50c75e3733..dab1de37e8ee 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -91,6 +91,16 @@ struct UnionSourceAccessor : SourceAccessor return {path, std::nullopt}; } + std::shared_ptr getProvenance(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto prov = accessor->getProvenance(path); + if (prov) + return prov; + } + return nullptr; + } + void invalidateCache(const CanonPath & path) override { for (auto & accessor : accessors) diff --git a/src/libutil/unix/signals.cc b/src/libutil/unix/signals.cc index 6f11010587a1..07018ef25643 100644 --- a/src/libutil/unix/signals.cc +++ b/src/libutil/unix/signals.cc @@ -43,7 +43,9 @@ struct InterruptCallbacks std::map> callbacks; }; -static Sync _interruptCallbacks; +// Note: this object intentionally leaks to ensure that it's not deleted while the detached signal handler thread is +// running. +static auto _interruptCallbacks = new Sync; static void signalHandlerThread(sigset_t set) { @@ -69,7 +71,7 @@ void unix::triggerInterrupt() while (true) { std::function callback; { - auto interruptCallbacks(_interruptCallbacks.lock()); + auto interruptCallbacks(_interruptCallbacks->lock()); auto lb = interruptCallbacks->callbacks.lower_bound(i); if (lb == interruptCallbacks->callbacks.end()) break; @@ -154,14 +156,14 @@ struct InterruptCallbackImpl : InterruptCallback ~InterruptCallbackImpl() override { - auto interruptCallbacks(_interruptCallbacks.lock()); + auto interruptCallbacks(_interruptCallbacks->lock()); interruptCallbacks->callbacks.erase(token); } }; std::unique_ptr createInterruptCallback(fun callback) { - auto interruptCallbacks(_interruptCallbacks.lock()); + auto interruptCallbacks(_interruptCallbacks->lock()); auto token = interruptCallbacks->nextToken++; interruptCallbacks->callbacks.emplace(token, callback); return std::make_unique(token); diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 184eba263f08..f518af6897bd 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -333,6 +333,15 @@ std::string ParsedURL::renderPath(bool encode) const return renderUrlPathNoPctEncoding(path); } +std::string ParsedURL::renderSanitized() const +{ + auto url = *this; + if (url.authority) + url.authority->password.reset(); + url.query.clear(); + return url.to_string(); +} + std::string ParsedURL::renderAuthorityAndPath() const { std::string res; diff --git a/src/libutil/util.cc b/src/libutil/util.cc index d75aa4d67d94..6e84f448edfd 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -238,15 +238,20 @@ void ignoreExceptionInDestructor(Verbosity lvl) } void ignoreExceptionExceptInterrupt(Verbosity lvl) +{ + logExceptionExceptInterrupt("error (ignored): ", lvl); +} + +void logExceptionExceptInterrupt(std::string_view prefix, Verbosity lvl) { try { throw; } catch (const Interrupted & e) { throw; } catch (Error & e) { - printMsg(lvl, ANSI_RED "error (ignored):" ANSI_NORMAL " %s", e.info().msg); + printMsg(lvl, ANSI_RED "%s" ANSI_NORMAL "%s", prefix, e.info().msg); } catch (std::exception & e) { - printMsg(lvl, ANSI_RED "error (ignored):" ANSI_NORMAL " %s", e.what()); + printMsg(lvl, ANSI_RED "%s" ANSI_NORMAL "%s", prefix, e.what()); } } diff --git a/src/nix/app.cc b/src/nix/app.cc index 634db04f3fe1..4a8ca815c0fa 100644 --- a/src/nix/app.cc +++ b/src/nix/app.cc @@ -74,6 +74,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) std::visit( overloaded{ [&](const NixStringContextElem::DrvDeep & d) -> DerivedPath { + state.waitForPath(d.drvPath); /* We want all outputs of the drv */ return DerivedPath::Built{ .drvPath = makeConstantStorePathRef(d.drvPath), @@ -81,6 +82,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) }; }, [&](const NixStringContextElem::Built & b) -> DerivedPath { + state.waitForPath(*b.drvPath); return DerivedPath::Built{ .drvPath = b.drvPath, .outputs = OutputsSpec::Names{b.output}, @@ -88,16 +90,19 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) }, [&](const NixStringContextElem::Opaque & o) -> DerivedPath { return DerivedPath::Opaque{ - .path = o.path, + .path = state.devirtualize(o.path), }; }, + [&](const NixStringContextElem::Path & p) -> DerivedPath { + throw Error("'program' attribute of an 'app' output cannot have no context"); + }, }, c.raw)); } return UnresolvedApp{App{ .context = std::move(context2), - .program = program, + .program = state.devirtualize(program, context), }}; } diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index 5bca8e0cb31f..0e5d18035719 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -54,21 +54,9 @@ struct CmdBundle : InstallableValueCommand return catSecondary; } - // FIXME: cut&paste from CmdRun. - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - Strings res{"apps." + settings.thisSystem.get() + ".default", "defaultApp." + settings.thisSystem.get()}; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPaths()) - res.push_back(s); - return res; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - Strings res{"apps." + settings.thisSystem.get() + "."}; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes()) - res.push_back(s); - return res; + return {"nix-run"}; } void run(ref store, ref installable) override @@ -86,9 +74,9 @@ struct CmdBundle : InstallableValueCommand std::move(bundlerFlakeRef), bundlerName, std::move(extendedOutputsSpec), - {"bundlers." + settings.thisSystem.get() + ".default", "defaultBundler." + settings.thisSystem.get()}, - {"bundlers." + settings.thisSystem.get() + "."}, - lockFlags}; + {"nix-bundler"}, + lockFlags, + getDefaultFlakeSchemas()}; auto vRes = evalState->allocValue(); evalState->callFunction(*bundler.toValue(*evalState).first, *val, *vRes, noPos); @@ -103,6 +91,8 @@ struct CmdBundle : InstallableValueCommand NixStringContext context2; auto drvPath = evalState->coerceToStorePath(attr1->pos, *attr1->value, context2, ""); + evalState->waitForAllPaths(); + drvPath.requireDerivation(); auto attr2 = vRes->attrs()->get(evalState->s.outPath); @@ -111,6 +101,8 @@ struct CmdBundle : InstallableValueCommand auto outPath = evalState->coerceToStorePath(attr2->pos, *attr2->value, context2, ""); + evalState->waitForAllPaths(); + store->buildPaths({ DerivedPath::Built{ .drvPath = makeConstantStorePathRef(drvPath), diff --git a/src/nix/crash-handler.cc b/src/nix/crash-handler.cc index 17c948dab143..29c4f2027ca4 100644 --- a/src/nix/crash-handler.cc +++ b/src/nix/crash-handler.cc @@ -34,7 +34,7 @@ void logFatal(std::string const & s) void onTerminate() { logFatal( - "Nix crashed. This is a bug. Please report this at https://github.com/NixOS/nix/issues with the following information included:\n"); + "Determinate Nix crashed. This is a bug. Please report this at https://github.com/DeterminateSystems/nix-src/issues with the following information included:\n"); try { std::exception_ptr eptr = std::current_exception(); if (eptr) { diff --git a/src/nix/crash.cc b/src/nix/crash.cc new file mode 100644 index 000000000000..3ed491ce75e4 --- /dev/null +++ b/src/nix/crash.cc @@ -0,0 +1,50 @@ +#include "nix/cmd/command.hh" + +#include + +using namespace nix; + +struct CmdCrash : Command +{ + std::string type; + + CmdCrash() + { + expectArg("type", &type); + } + + Category category() override + { + return catUndocumented; + } + + std::string description() override + { + return "crash the program to test crash reporting"; + } + + void run() override + { + if (type == "segfault") { + printError("Triggering a segfault..."); + volatile int * p = nullptr; + *p = 123; + } + + else if (type == "assert") { + printError("Triggering an assertion failure..."); + assert(false && "This is an assertion failure"); + } + + else if (type == "logic-error") { + printError("Triggering a C++ logic error..."); + std::bitset<4>{"012"}; + } + + else { + throw Error("unknown crash type '%s'", type); + } + } +}; + +static auto rCrash = registerCommand("__crash"); diff --git a/src/nix/develop.cc b/src/nix/develop.cc index b898e99a4fe3..19cb0151594d 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -1,5 +1,6 @@ #include "nix/util/config-global.hh" #include "nix/expr/eval.hh" +#include "nix/fetchers/fetch-settings.hh" #include "nix/cmd/installable-flake.hh" #include "nix/cmd/command-installable-value.hh" #include "nix/main/common-args.hh" @@ -383,7 +384,8 @@ struct Common : InstallableCommand, MixProfile /* Substitute occurrences of output paths. */ auto outputs = buildEnvironment.vars.find("outputs"); - assert(outputs != buildEnvironment.vars.end()); + if (outputs == buildEnvironment.vars.end()) + throw Error("derivation does not have an 'outputs' attribute"); StringMap rewrites; if (buildEnvironment.providesStructuredAttrs()) { @@ -455,22 +457,9 @@ struct Common : InstallableCommand, MixProfile rewrites.insert({BuildEnvironment::getString(fileInBuilderEnv->second), targetFilePath.string()}); } - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - Strings paths{ - "devShells." + settings.thisSystem.get() + ".default", - "devShell." + settings.thisSystem.get(), - }; - for (auto & p : SourceExprCommand::getDefaultFlakeAttrPaths()) - paths.push_back(p); - return paths; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - auto res = SourceExprCommand::getDefaultFlakeAttrPathPrefixes(); - res.emplace_front("devShells." + settings.thisSystem.get() + "."); - return res; + return {"nix-develop"}; } StorePath getShellOutPath(ref store, ref installable) @@ -653,9 +642,9 @@ struct CmdDevelop : Common, MixEnvironment std::move(nixpkgs), "bashInteractive", ExtendedOutputsSpec::Default(), - Strings{}, - Strings{"legacyPackages." + settings.thisSystem.get() + "."}, - nixpkgsLockFlags); + StringSet{"nix-build"}, + nixpkgsLockFlags, + std::nullopt); for (auto & path : Installable::toStorePathSet( getEvalStore(), store, Realise::Outputs, OperateOn::Output, {bashInstallable})) { diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index d36a21d746ff..a71efa042e84 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -54,10 +54,10 @@ GroupedPaths getClosureInfo(ref store, const StorePath & toplevel) std::string showVersions(const StringSet & versions) { if (versions.empty()) - return "∅"; + return "(absent)"; StringSet versions2; for (auto & version : versions) - versions2.insert(version.empty() ? "ε" : version); + versions2.insert(version.empty() ? "(no version)" : version); return concatStringsSep(", ", versions2); } @@ -104,8 +104,13 @@ void printClosureDiff( if (showDelta || !removed.empty() || !added.empty()) { std::vector items; - if (!removed.empty() || !added.empty()) + if (!removed.empty() && !added.empty()) { items.push_back(fmt("%s → %s", showVersions(removed), showVersions(added))); + } else if (!removed.empty()) { + items.push_back(fmt("%s removed", showVersions(removed))); + } else if (!added.empty()) { + items.push_back(fmt("%s added", showVersions(added))); + } if (showDelta) items.push_back(fmt("%s%s" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, renderSize(sizeDelta))); logger->cout("%s%s: %s", indent, name, concatStringsSep(", ", items)); diff --git a/src/nix/diff-closures.md b/src/nix/diff-closures.md index 0294c0d8def7..6b07af28f958 100644 --- a/src/nix/diff-closures.md +++ b/src/nix/diff-closures.md @@ -11,8 +11,8 @@ R""( baloo-widgets: 20.08.1 → 20.08.2 bluez-qt: +12.6 KiB dolphin: 20.08.1 → 20.08.2, +13.9 KiB - kdeconnect: 20.08.2 → ∅, -6597.8 KiB - kdeconnect-kde: ∅ → 20.08.2, +6599.7 KiB + kdeconnect: 20.08.2 removed, -6597.8 KiB + kdeconnect-kde: 20.08.2 added, +6599.7 KiB … ``` @@ -34,9 +34,9 @@ dolphin: 20.08.1 → 20.08.2, +13.9 KiB No size change is shown if it's below the threshold. If the package does not exist in either the *before* or *after* closures, it is -represented using `∅` (empty set) on the appropriate side of the -arrow. If a package has an empty version string, the version is -rendered as `ε` (epsilon). +represented using `added` or `removed`. +If a package has an empty version string, the version is +rendered as `(no version)`. There may be multiple versions of a package in each closure. In that case, only the changed versions are shown. Thus, diff --git a/src/nix/eval.cc b/src/nix/eval.cc index f41d98f1a8d0..db23382003ea 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -112,11 +112,14 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption logger->stop(); writeFull( getStandardOutput(), - *state->coerceToString(noPos, *v, context, "while generating the eval command output")); + state->devirtualize( + *state->coerceToString(noPos, *v, context, "while generating the eval command output"), context)); } else if (json) { - printJSON(printValueAsJSON(*state, true, *v, pos, context, false)); + // FIXME: use printJSON + auto j = printValueAsJSON(*state, true, *v, pos, context, false); + logger->cout("%s", state->devirtualize(outputPretty ? j.dump(2) : j.dump(), context)); } else { diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md index 007640c27c94..a700c5cbe6fe 100644 --- a/src/nix/flake-check.md +++ b/src/nix/flake-check.md @@ -18,66 +18,20 @@ R""( # Description This command verifies that the flake specified by flake reference -*flake-url* can be evaluated successfully (as detailed below), and -that the derivations specified by the flake's `checks` output can be -built successfully. +*flake-url* can be evaluated and built successfully according to its +`schemas` flake output. For every flake output that has a schema +definition, `nix flake check` uses the schema to extract the contents +of the output. Then, for every item in the contents: + +* It evaluates the elements of the `evalChecks` attribute set returned + by the schema for that item, printing an error or warning for every + check that fails to evaluate or that evaluates to `false`. + +* It builds the `derivation` attribute returned by the schema for that + item, if the item has the `isFlakeCheck` attribute. If the `keep-going` option is set to `true`, Nix will keep evaluating as much as it can and report the errors as it encounters them. Otherwise it will stop at the first error. -# Evaluation checks - -The following flake output attributes must be derivations: - -* `checks.`*system*`.`*name* -* `devShells.`*system*`.default` -* `devShells.`*system*`.`*name* -* `nixosConfigurations.`*name*`.config.system.build.toplevel` -* `packages.`*system*`.default` -* `packages.`*system*`.`*name* - -The following flake output attributes must be [app -definitions](./nix3-run.md): - -* `apps.`*system*`.default` -* `apps.`*system*`.`*name* - -The following flake output attributes must be [template -definitions](./nix3-flake-init.md): - -* `templates.default` -* `templates.`*name* - -The following flake output attributes must be *Nixpkgs overlays*: - -* `overlays.default` -* `overlays.`*name* - -The following flake output attributes must be *NixOS modules*: - -* `nixosModules.default` -* `nixosModules.`*name* - -The following flake output attributes must be -[bundlers](./nix3-bundle.md): - -* `bundlers.default` -* `bundlers.`*name* - -Old default attributes are renamed, they will work but will emit a warning: - -* `defaultPackage.` → `packages.`*system*`.default` -* `defaultApps.` → `apps.`*system*`.default` -* `defaultTemplate` → `templates.default` -* `defaultBundler.` → `bundlers.`*system*`.default` -* `overlay` → `overlays.default` -* `devShell.` → `devShells.`*system*`.default` -* `nixosModule` → `nixosModules.default` - -In addition, the `hydraJobs` output is evaluated in the same way as -Hydra's `hydra-eval-jobs` (i.e. as a arbitrarily deeply nested -attribute set of derivations). Similarly, the -`legacyPackages`.*system* output is evaluated like `nix-env --query --available `. - )"" diff --git a/src/nix/flake-prefetch-inputs.cc b/src/nix/flake-prefetch-inputs.cc index 4ea6342c3695..19fbb0b574bb 100644 --- a/src/nix/flake-prefetch-inputs.cc +++ b/src/nix/flake-prefetch-inputs.cc @@ -43,11 +43,14 @@ struct CmdFlakePrefetchInputs : FlakeCommand return; if (auto lockedNode = dynamic_cast(&node)) { + if (lockedNode->buildTime) + return; try { Activity act(*logger, lvlInfo, actUnknown, fmt("fetching '%s'", lockedNode->lockedRef)); auto accessor = lockedNode->lockedRef.input.getAccessor(fetchSettings, *store).first; - fetchToStore( - fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); + if (!evalSettings.lazyTrees) + fetchToStore( + fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); } catch (Error & e) { printError("%s", e.what()); nrFailed++; diff --git a/src/nix/flake-prefetch-inputs.md b/src/nix/flake-prefetch-inputs.md index a69f7d367915..b571fa348370 100644 --- a/src/nix/flake-prefetch-inputs.md +++ b/src/nix/flake-prefetch-inputs.md @@ -12,6 +12,6 @@ R""( Fetch the inputs of a flake. This ensures that they are already available for any subsequent evaluation of the flake. -This operation is recursive: it will fetch not just the direct inputs of the top-level flake, but also transitive inputs. +This operation is recursive: it fetches not just the direct inputs of the top-level flake, but also transitive inputs. It skips build-time inputs, i.e. inputs that have the attribute `buildTime = true`. )"" diff --git a/src/nix/flake.cc b/src/nix/flake.cc index df2a3a9114a0..f50a2b1bc305 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -20,6 +20,10 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/store/local-fs-store.hh" #include "nix/store/globals.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/util/exit.hh" +#include "nix/cmd/flake-schemas.hh" +#include "nix/store/names.hh" #include #include @@ -134,6 +138,7 @@ struct CmdFlakeUpdate : FlakeCommand lockFlags.recreateLockFile = updateAll; lockFlags.writeLockFile = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } @@ -166,38 +171,12 @@ struct CmdFlakeLock : FlakeCommand lockFlags.writeLockFile = true; lockFlags.failOnUnlocked = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } }; -static void enumerateOutputs( - EvalState & state, - Value & vFlake, - std::function callback) -{ - auto pos = vFlake.determinePos(noPos); - state.forceAttrs(vFlake, pos, "while evaluating a flake to get its outputs"); - - auto aOutputs = vFlake.attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - state.forceAttrs(*aOutputs->value, pos, "while evaluating the outputs of a flake"); - - auto sHydraJobs = state.symbols.create("hydraJobs"); - - /* Hack: ensure that hydraJobs is evaluated before anything - else. This way we can disable IFD for hydraJobs and then enable - it for other outputs. */ - if (auto attr = aOutputs->value->attrs()->get(sHydraJobs)) - callback(state.symbols[attr->name], *attr->value, attr->pos); - - for (auto & attr : *aOutputs->value->attrs()) { - if (attr.name != sHydraJobs) - callback(state.symbols[attr.name], *attr.value, attr.pos); - } -} - struct CmdFlakeMetadata : FlakeCommand, MixJSON { std::string description() override @@ -214,11 +193,17 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON void run(nix::ref store) override { + lockFlags.requireLockable = false; auto lockedFlake = lockFlake(); auto & flake = lockedFlake.flake; - // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); + /* Hack to show the store path if available. */ + std::optional storePath; + if (store->isInStore(flake.path.path.abs())) { + auto path = store->toStorePath(flake.path.path.abs()).first; + if (store->isValidPath(path)) + storePath = path; + } if (json) { nlohmann::json j; @@ -240,7 +225,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON j["revCount"] = *revCount; if (auto lastModified = flake.lockedRef.input.getLastModified()) j["lastModified"] = *lastModified; - j["path"] = storePath; + if (storePath) + j["path"] = store->printStorePath(*storePath); j["locks"] = lockedFlake.lockFile.toJSON().first; if (auto fingerprint = lockedFlake.getFingerprint(*store, fetchSettings)) j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false); @@ -251,7 +237,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON logger->cout(ANSI_BOLD "Locked URL:" ANSI_NORMAL " %s", flake.lockedRef.to_string()); if (flake.description) logger->cout(ANSI_BOLD "Description:" ANSI_NORMAL " %s", *flake.description); - logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", storePath); + if (storePath) + logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", store->printStorePath(*storePath)); if (auto rev = flake.lockedRef.input.getRev()) logger->cout(ANSI_BOLD "Revision:" ANSI_NORMAL " %s", rev->to_string(HashFormat::Base16, false)); if (auto dirtyRev = fetchers::maybeGetStrAttr(flake.lockedRef.toAttrs(), "dirtyRev")) @@ -283,7 +270,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON "%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s%s", prefix + (last ? treeLast : treeConn), input.first, - (*lockedNode)->lockedRef, + (*lockedNode)->lockedRef.to_string(true), lastModifiedStr); bool firstVisit = visited.insert(*lockedNode).second; @@ -312,9 +299,26 @@ struct CmdFlakeInfo : CmdFlakeMetadata } }; -struct CmdFlakeCheck : FlakeCommand +/** + * Log the current exception, after forcing cached evaluation errors. + */ +static void logEvalError() +{ + try { + try { + throw; + } catch (eval_cache::CachedEvalError & e) { + e.force(); + } + } catch (Error & e) { + logError(e.info()); + } +} + +struct CmdFlakeCheck : FlakeCommand, MixFlakeSchemas { bool build = true; + bool buildAll = false; bool checkAllSystems = false; CmdFlakeCheck() @@ -324,6 +328,11 @@ struct CmdFlakeCheck : FlakeCommand .description = "Do not build checks.", .handler = {&build, false}, }); + addFlag({ + .longName = "build-all", + .description = "Build all derivations, not just checks.", + .handler = {&buildAll, true}, + }); addFlag({ .longName = "all-systems", .description = "Check the outputs for all systems.", @@ -353,441 +362,126 @@ struct CmdFlakeCheck : FlakeCommand auto state = getEvalState(); lockFlags.applyNixConfig = true; - auto flake = lockFlake(); + auto flake = std::make_shared(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); - bool hasErrors = false; - auto reportError = [&](const Error & e) { - try { - throw e; - } catch (Interrupted & e) { - throw; - } catch (Error & e) { - if (settings.getWorkerSettings().keepGoing) { - logError(e.info()); - hasErrors = true; - } else - throw; - } - }; + auto cache = flake_schemas::call(*state, flake, getDefaultFlakeSchemas()); - StringSet omittedSystems; + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); - // FIXME: rewrite to use EvalCache. + FutureVector futures(*state->executor); - auto resolve = [&](PosIdx p) { return state->positions[p]; }; + Sync> drvPaths_; + Sync> uncheckedOutputs; + Sync> omittedSystems; + Sync>> derivedPathToAttrPaths_; - auto argHasName = [&](Symbol arg, std::string_view expected) { - std::string_view name = state->symbols[arg]; - return name == expected || name == "_" || (hasPrefix(name, "_") && name.substr(1) == expected); - }; + std::function node)> visit; - auto checkSystemName = [&](std::string_view system, const PosIdx pos) { - // FIXME: what's the format of "system"? - if (system.find('-') == std::string::npos) - reportError(Error("'%s' is not a valid system type, at %s", system, resolve(pos))); - }; + std::atomic_bool hasErrors = false; - auto checkSystemType = [&](std::string_view system, const PosIdx pos) { - if (!checkAllSystems && system != localSystem) { - omittedSystems.insert(std::string(system)); - return false; - } else { - return true; - } - }; + visit = [&](ref node) { + flake_schemas::visit( + checkAllSystems ? std::optional() : localSystem, + false, // FIXME: add a --legacy flag? + node, + flake->flake.provenance, - auto checkDerivation = - [&](const std::string & attrPath, Value & v, const PosIdx pos) -> std::optional { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking derivation %s", attrPath)); - auto packageInfo = getDerivation(*state, v, false); - if (!packageInfo) - throw Error("flake attribute '%s' is not a derivation", attrPath); - else { - // FIXME: check meta attributes - auto storePath = packageInfo->queryDrvPath(); - if (storePath) { - logger->log( - lvlInfo, fmt("derivation evaluated to %s", store->printStorePath(storePath.value()))); - } - return storePath; - } - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the derivation '%s'", attrPath)); - reportError(e); - } - return std::nullopt; - }; - - std::map> attrPathsByDrv; - - auto checkApp = [&](const std::string & attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking app '%s'", attrPath)); - state->forceAttrs(v, pos, ""); - if (auto attr = v.attrs()->get(state->symbols.create("type"))) - state->forceStringNoCtx(*attr->value, attr->pos, ""); - else - throw Error("app '%s' lacks attribute 'type'", attrPath); - - if (auto attr = v.attrs()->get(state->symbols.create("program"))) { - if (attr->name == state->symbols.create("program")) { - NixStringContext context; - state->forceString(*attr->value, context, attr->pos, ""); - } - } else - throw Error("app '%s' lacks attribute 'program'", attrPath); - - if (auto attr = v.attrs()->get(state->symbols.create("meta"))) { - state->forceAttrs(*attr->value, attr->pos, ""); - if (auto dAttr = attr->value->attrs()->get(state->symbols.create("description"))) - state->forceStringNoCtx(*dAttr->value, dAttr->pos, ""); - else - logWarning({ - .msg = HintFmt("app '%s' lacks attribute 'meta.description'", attrPath), - }); - } else - logWarning({ - .msg = HintFmt("app '%s' lacks attribute 'meta'", attrPath), - }); - - for (auto & attr : *v.attrs()) { - std::string_view name(state->symbols[attr.name]); - if (name != "type" && name != "program" && name != "meta") - throw Error("app '%s' has unsupported attribute '%s'", attrPath, name); - } - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the app definition '%s'", attrPath)); - reportError(e); - } - }; - - auto checkOverlay = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking overlay '%s'", attrPath)); - state->forceValue(v, pos); - if (!v.isLambda()) { - throw Error("overlay is not a function, but %s instead", showType(v)); - } - if (v.lambda().fun->getFormals() || !argHasName(v.lambda().fun->arg, "final")) - throw Error("overlay does not take an argument named 'final'"); - // FIXME: if we have a 'nixpkgs' input, use it to - // evaluate the overlay. - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the overlay '%s'", attrPath)); - reportError(e); - } - }; - - auto checkModule = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking NixOS module '%s'", attrPath)); - state->forceValue(v, pos); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the NixOS module '%s'", attrPath)); - reportError(e); - } - }; - - std::function checkHydraJobs; - - checkHydraJobs = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath)); - state->forceAttrs(v, pos, ""); - - if (state->isDerivation(v)) - throw Error("jobset should not be a derivation at top-level"); - - for (auto & attr : *v.attrs()) { - state->forceAttrs(*attr.value, attr.pos, ""); - auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]); - if (state->isDerivation(*attr.value)) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath2)); - checkDerivation(attrPath2, *attr.value, attr.pos); - } else - checkHydraJobs(attrPath2, *attr.value, attr.pos); - } - - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the Hydra jobset '%s'", attrPath)); - reportError(e); - } - }; - - auto checkNixOSConfiguration = [&](const std::string & attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking NixOS configuration '%s'", attrPath)); - Bindings & bindings = Bindings::emptyBindings; - auto vToplevel = findAlongAttrPath(*state, "config.system.build.toplevel", bindings, v).first; - state->forceValue(*vToplevel, pos); - if (!state->isDerivation(*vToplevel)) - throw Error("attribute 'config.system.build.toplevel' is not a derivation"); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the NixOS configuration '%s'", attrPath)); - reportError(e); - } - }; - - auto checkTemplate = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking template '%s'", attrPath)); - - state->forceAttrs(v, pos, ""); - - if (auto attr = v.attrs()->get(state->symbols.create("path"))) { - if (attr->name == state->symbols.create("path")) { - NixStringContext context; - auto path = state->coerceToPath(attr->pos, *attr->value, context, ""); - if (!path.pathExists()) - throw Error("template '%s' refers to a non-existent path '%s'", attrPath, path); - // TODO: recursively check the flake in 'path'. - } - } else - throw Error("template '%s' lacks attribute 'path'", attrPath); - - if (auto attr = v.attrs()->get(state->symbols.create("description"))) - state->forceStringNoCtx(*attr->value, attr->pos, ""); - else - throw Error("template '%s' lacks attribute 'description'", attrPath); - - for (auto & attr : *v.attrs()) { - std::string_view name(state->symbols[attr.name]); - if (name != "path" && name != "description" && name != "welcomeText") - throw Error("template '%s' has unsupported attribute '%s'", attrPath, name); - } - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the template '%s'", attrPath)); - reportError(e); - } - }; - - auto checkBundler = [&](const std::string & attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking bundler '%s'", attrPath)); - state->forceValue(v, pos); - if (!v.isLambda()) - throw Error("bundler must be a function"); - // TODO: check types of inputs/outputs? - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the template '%s'", attrPath)); - reportError(e); - } - }; - - { - Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); - - auto vFlake = state->allocValue(); - flake::callFlake(*state, flake, *vFlake); - - enumerateOutputs(*state, *vFlake, [&](std::string_view name, Value & vOutput, const PosIdx pos) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking flake output '%s'", name)); - - try { - evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs"); - - state->forceValue(vOutput, pos); - - std::string_view replacement = name == "defaultPackage" ? "packages..default" - : name == "defaultApp" ? "apps..default" - : name == "defaultTemplate" ? "templates.default" - : name == "defaultBundler" ? "bundlers..default" - : name == "overlay" ? "overlays.default" - : name == "devShell" ? "devShells..default" - : name == "nixosModule" ? "nixosModules.default" - : ""; - if (replacement != "") - warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement); - - if (name == "checks") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - std::string_view attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - auto drvPath = checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - if (drvPath && attr_name == settings.thisSystem.get()) { - auto path = DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(*drvPath), - .outputs = OutputsSpec::All{}, - }; - - // Build and store the attribute path for error reporting - AttrPath attrPath{state->symbols.create(name), attr.name, attr2.name}; - attrPathsByDrv[path].push_back(std::move(attrPath)); - } - } + [&](const flake_schemas::Leaf & leaf) { + try { + bool done = true; + bool buildSkipped = false; + + if (auto evalChecks = leaf.node->maybeGetAttr("evalChecks")) { + auto checkNames = evalChecks->getAttrs(); + for (auto & checkName : checkNames) { + auto cursor = evalChecks->getAttr(checkName); + Activity act( + *logger, + lvlInfo, + actUnknown, + fmt("running flake check '%s'", cursor->getAttrPathStr())); + auto b = cursor->getBool(); + if (!b) + throw Error("Evaluation check '%s' failed.", cursor->getAttrPathStr()); } } - } - - else if (name == "formatter") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; - } - } - - else if (name == "packages" || name == "devShells") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; - } - } - - else if (name == "apps") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkApp( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; - } - } - - else if (name == "defaultPackage" || name == "devShell") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; - } - } - else if (name == "defaultApp") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkApp(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + if (auto drv = leaf.derivation(outputs)) { + + /* Check whether this is a valid derivation. */ + if (!drv->maybeGetAttr("drvPath") || drv->getAttr("type")->getString() != "derivation") + throw Error("Flake output '%s' is not a derivation.", drv->getAttrPathStr()); + + DrvName parsedDrvName(drv->getAttr("name")->getString()); + + if (buildAll || leaf.isFlakeCheck()) { + auto drvPath = drv->forceDerivation(); + auto derivedPath = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(drvPath), + .outputs = OutputsSpec::All{}, + }; + (*derivedPathToAttrPaths_.lock())[derivedPath].push_back(leaf.node->getAttrPath()); + drvPaths_.lock()->push_back(std::move(derivedPath)); + if (build) + done = false; + } else + buildSkipped = true; } - } - else if (name == "legacyPackages") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - checkSystemName(state->symbols[attr.name], attr.pos); - checkSystemType(state->symbols[attr.name], attr.pos); - // FIXME: do getDerivations? - } - } - - else if (name == "overlay") - checkOverlay(name, vOutput, pos); - - else if (name == "overlays") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } - - else if (name == "nixosModule") - checkModule(name, vOutput, pos); - - else if (name == "nixosModules") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkModule(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } - - else if (name == "nixosConfigurations") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkNixOSConfiguration( - fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + if (done) + notice( + "✅ " ANSI_BOLD "%s" ANSI_NORMAL "%s", + leaf.node->getAttrPathStr(), + buildSkipped ? ANSI_ITALIC ANSI_FAINT " (build skipped)" : ""); + } catch (Interrupted & e) { + throw; + } catch (Error & e) { + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, leaf.node->getAttrPathStr()); + if (settings.getWorkerSettings().keepGoing) { + logEvalError(); + hasErrors = true; + } else + throw; } + }, - else if (name == "hydraJobs") - checkHydraJobs(name, vOutput, pos); + [&](std::function forEachChild) { + forEachChild([&](Symbol attrName, ref node, bool isLast) { + state->spawn(futures, 2, [&visit, node]() { visit(node); }); + }); + }, - else if (name == "defaultTemplate") - checkTemplate(name, vOutput, pos); + [&](ref node, const std::vector & systems) { + for (auto & s : systems) + omittedSystems.lock()->insert(s); + }, - else if (name == "templates") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } - - else if (name == "defaultBundler") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkBundler(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; - } - } + [&](ref node) {}); + }; - else if (name == "bundlers") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - checkBundler( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - } - }; - } - } + flake_schemas::forEachOutput( + inventory, + [&](Symbol outputName, + std::shared_ptr output, + const std::string & doc, + bool isLast) { + if (output) + state->spawn(futures, 1, [&visit, output(ref(output))]() { visit(output); }); + else + uncheckedOutputs.lock()->insert(std::string(state->symbols[outputName])); + }); - else if ( - name == "lib" || name == "darwinConfigurations" || name == "darwinModules" - || name == "flakeModule" || name == "flakeModules" || name == "herculesCI" - || name == "homeConfigurations" || name == "homeModule" || name == "homeModules" - || name == "nixopsConfigurations") - // Known but unchecked community attribute - ; + futures.finishAll(); - else - warn("unknown flake output '%s'", name); + if (!uncheckedOutputs.lock()->empty()) + warn("The following flake outputs are unchecked: %s.", concatStringsSep(", ", *uncheckedOutputs.lock())); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking flake output '%s'", name)); - reportError(e); - } - }); - } + auto drvPaths(drvPaths_.lock()); + auto derivedPathToAttrPaths(derivedPathToAttrPaths_.lock()); - if (build && !attrPathsByDrv.empty()) { - auto keys = std::views::keys(attrPathsByDrv); - std::vector drvPaths(keys.begin(), keys.end()); + if (build && !drvPaths->empty()) { // TODO: This filtering of substitutable paths is a temporary workaround until // https://github.com/NixOS/nix/issues/5025 (union stores) is implemented. // @@ -799,61 +493,71 @@ struct CmdFlakeCheck : FlakeCommand // For now, we skip building derivations whose outputs are already available // via substitution, as `nix flake check` only needs to verify buildability, // not actually produce the outputs. - auto missing = store->queryMissing(drvPaths); + state->waitForAllPaths(); + auto missing = store->queryMissing(*drvPaths); std::vector toBuild; + std::set toBuildSet; for (auto & path : missing.willBuild) { - toBuild.emplace_back( - DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(path), - .outputs = OutputsSpec::All{}, - }); + auto derivedPath = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(path), + .outputs = OutputsSpec::All{}, + }; + toBuild.emplace_back(derivedPath); + toBuildSet.insert(std::move(derivedPath)); } + for (auto & [derivedPath, attrPaths] : *derivedPathToAttrPaths) + if (!toBuildSet.contains(derivedPath)) + for (auto & attrPath : attrPaths) + notice( + "✅ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_ITALIC ANSI_FAINT " (previously built)" ANSI_NORMAL, + attrPath.to_string(*state)); + + // FIXME: should start building while evaluating. Activity act(*logger, lvlInfo, actUnknown, fmt("running %d flake checks", toBuild.size())); - auto results = store->buildPathsWithResults(toBuild); - - // Report build failures with attribute paths - for (auto & result : results) { - if (auto * failure = result.tryGetFailure()) { - auto it = attrPathsByDrv.find(result.path); - if (it != attrPathsByDrv.end() && !it->second.empty()) { - for (auto & attrPath : it->second) { - reportError(Error( - "failed to build attribute '%s', build of '%s' failed: %s", - attrPath.to_string(*state), - result.path.to_string(*store), - failure->message())); - } - } else { - // Derivation has no attribute path (e.g., a build dependency) - reportError( - Error("build of '%s' failed: %s", result.path.to_string(*store), failure->message())); + + auto buildResults = store->buildPathsWithResults(toBuild); + assert(buildResults.size() == toBuild.size()); + + for (auto & buildResult : buildResults) { + if (auto failure = buildResult.tryGetFailure()) + try { + hasErrors = true; + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + if (failure->status == BuildResult::Failure::Cancelled) + notice( + "❓ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_FAINT " (cancelled)", + attrPath.to_string(*state)); + else + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, attrPath.to_string(*state)); + if (failure->status != BuildResult::Failure::Cancelled) + throw *failure; + } catch (Error & e) { + logError(e.info()); } - } + else + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + notice("✅ " ANSI_BOLD "%s" ANSI_NORMAL, attrPath.to_string(*state)); } } - if (hasErrors) - throw Error("some errors were encountered during the evaluation"); - logger->log(lvlInfo, ANSI_GREEN "all checks passed!" ANSI_NORMAL); - - if (!omittedSystems.empty()) { + if (!omittedSystems.lock()->empty()) { // TODO: empty system is not visible; render all as nix strings? warn( "The check omitted these incompatible systems: %s\n" "Use '--all-systems' to check all.", - concatStringsSep(", ", omittedSystems)); - }; + concatStringsSep(", ", *omittedSystems.lock())); + } + + if (hasErrors) + throw Exit(1); }; }; -static Strings defaultTemplateAttrPathsPrefixes{"templates."}; -static Strings defaultTemplateAttrPaths = {"templates.default", "defaultTemplate"}; - -struct CmdFlakeInitCommon : virtual Args, EvalCommand +struct CmdFlakeInitCommon : virtual Args, EvalCommand, MixFlakeSchemas { - std::string templateUrl = "templates"; + std::string templateUrl = "https://flakehub.com/f/DeterminateSystems/flake-templates/0.1"; std::filesystem::path destDir; const LockFlags lockFlags{.writeLockFile = false}; @@ -867,13 +571,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand .labels = {"template"}, .handler = {&templateUrl}, .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { - completeFlakeRefWithFragment( - completions, - getEvalState(), - lockFlags, - defaultTemplateAttrPathsPrefixes, - defaultTemplateAttrPaths, - prefix); + completeFlakeRefWithFragment(completions, getEvalState(), lockFlags, {"nix-template"}, prefix); }}, }); } @@ -893,9 +591,9 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand std::move(templateFlakeRef), templateName, ExtendedOutputsSpec::Default(), - defaultTemplateAttrPaths, - defaultTemplateAttrPathsPrefixes, - lockFlags); + {"nix-template"}, + lockFlags, + {}); auto cursor = installable.getCursor(*evalState); @@ -1092,7 +790,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs StorePathSet sources; - auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; + auto storePath = dryRun ? flake.flake.lockedRef.input.computeStorePath(*store) + : std::get(flake.flake.lockedRef.input.fetchToStore(fetchSettings, *store)); sources.insert(storePath); @@ -1105,7 +804,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs std::optional storePath; if (!(*inputNode)->lockedRef.input.isRelative()) { storePath = dryRun ? (*inputNode)->lockedRef.input.computeStorePath(*store) - : (*inputNode)->lockedRef.input.fetchToStore(fetchSettings, *store).first; + : std::get( + (*inputNode)->lockedRef.input.fetchToStore(fetchSettings, *store)); sources.insert(*storePath); } if (json) { @@ -1138,10 +838,13 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs } }; -struct CmdFlakeShow : FlakeCommand, MixJSON +struct CmdFlakeShow : FlakeCommand, MixJSON, MixFlakeSchemas { bool showLegacy = false; bool showAllSystems = false; + bool showOutputPaths = false; + bool showDrvPaths = false; + bool showDrvNames = false; CmdFlakeShow() { @@ -1155,6 +858,21 @@ struct CmdFlakeShow : FlakeCommand, MixJSON .description = "Show the contents of outputs for all systems.", .handler = {&showAllSystems, true}, }); + addFlag({ + .longName = "output-paths", + .description = "Include the store paths of derivation outputs in the JSON output.", + .handler = {&showOutputPaths, true}, + }); + addFlag({ + .longName = "drv-paths", + .description = "Include the store paths of derivations in the JSON output.", + .handler = {&showDrvPaths, true}, + }); + addFlag({ + .longName = "drv-names", + .description = "Show the names and versions of derivations.", + .handler = {&showDrvNames, true}, + }); } std::string description() override @@ -1171,299 +889,180 @@ struct CmdFlakeShow : FlakeCommand, MixJSON void run(nix::ref store) override { - evalSettings.enableImportFromDerivation.setDefault(false); + if (showOutputPaths && !json) + throw UsageError("The '--output-paths' flag requires '--json'."); + + if (showDrvPaths && !json) + throw UsageError("The '--drv-paths' flag requires '--json'."); auto state = getEvalState(); auto flake = make_ref(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); - std::function - hasContent; - - // For frameworks it's important that structures are as lazy as possible - // to prevent infinite recursions, performance issues and errors that - // aren't related to the thing to evaluate. As a consequence, they have - // to emit more attributes than strictly (sic) necessary. - // However, these attributes with empty values are not useful to the user - // so we omit them. - hasContent = [&](eval_cache::AttrCursor & visitor, const AttrPath & attrPath, const Symbol & attr) -> bool { - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto attrPathS = attrPath2.resolve(*state); - const auto & attrName = state->symbols[attr]; - - auto visitor2 = visitor.getAttr(attrName); - - try { - if ((attrPathS[0] == "apps" || attrPathS[0] == "checks" || attrPathS[0] == "devShells" - || attrPathS[0] == "legacyPackages" || attrPathS[0] == "packages") - && (attrPathS.size() == 1 || attrPathS.size() == 2)) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto cache = flake_schemas::call(*state, flake, getDefaultFlakeSchemas()); - if ((attrPathS.size() == 1) - && (attrPathS[0] == "formatter" || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" || attrPathS[0] == "overlays")) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); - // If we don't recognize it, it's probably content - return true; - } catch (EvalError & e) { - // Some attrs may contain errors, e.g. legacyPackages of - // nixpkgs. We still want to recurse into it, instead of - // skipping it at all. - return true; - } - }; + FutureVector futures(*state->executor); - std::function - visit; - - visit = [&](eval_cache::AttrCursor & visitor, - const AttrPath & attrPath, - const std::string & headerPrefix, - const std::string & nextPrefix) -> nlohmann::json { - auto j = nlohmann::json::object(); - - auto attrPathS = attrPath.resolve(*state); - - Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", attrPath.to_string(*state))); - - try { - auto recurse = [&]() { - if (!json) - logger->cout("%s", headerPrefix); - std::vector attrs; - for (const auto & attr : visitor.getAttrs()) { - if (hasContent(visitor, attrPath, attr)) - attrs.push_back(attr); - } + std::function node, nlohmann::json & obj)> visit; - for (const auto & [i, attr] : enumerate(attrs)) { - const auto & attrName = state->symbols[attr]; - bool last = i + 1 == attrs.size(); - auto visitor2 = visitor.getAttr(attrName); - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto j2 = visit( - *visitor2, - attrPath2, - fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, - nextPrefix, - last ? treeLast : treeConn, - attrName), - nextPrefix + (last ? treeNull : treeLine)); - if (json) - j.emplace(attrName, std::move(j2)); - } - }; + visit = [&](ref node, nlohmann::json & obj) { + flake_schemas::visit( + showAllSystems ? std::optional() : localSystem, + showLegacy, + node, + flake->flake.provenance, - auto showDerivation = [&]() { - auto name = visitor.getAttr(state->s.name)->getString(); + [&](const flake_schemas::Leaf & leaf) { + if (auto what = leaf.what()) + obj.emplace("what", *what); - if (json) { - std::optional description; - if (auto aMeta = visitor.maybeGetAttr(state->s.meta)) { - if (auto aDescription = aMeta->maybeGetAttr(state->s.description)) - description = aDescription->getString(); - } - j.emplace("type", "derivation"); - j.emplace("name", name); - j.emplace("description", description ? *description : ""); - } else { - logger->cout( - "%s: %s '%s'", - headerPrefix, - attrPath.size() == 2 && attrPathS[0] == "devShell" ? "development environment" - : attrPath.size() >= 2 && attrPathS[0] == "devShells" ? "development environment" - : attrPath.size() == 3 && attrPathS[0] == "checks" ? "derivation" - : attrPath.size() >= 1 && attrPathS[0] == "hydraJobs" ? "derivation" - : "package", - name); - } - }; + if (auto shortDescription = leaf.shortDescription()) + obj.emplace("shortDescription", *shortDescription); - if (attrPath.size() == 0 - || (attrPath.size() == 1 - && (attrPathS[0] == "defaultPackage" || attrPathS[0] == "devShell" - || attrPathS[0] == "formatter" || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" || attrPathS[0] == "defaultApp" - || attrPathS[0] == "templates" || attrPathS[0] == "overlays")) - || ((attrPath.size() == 1 || attrPath.size() == 2) - && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells" - || attrPathS[0] == "apps"))) { - recurse(); - } + if (auto drv = leaf.derivation(outputs)) { + auto drvObj = nlohmann::json::object(); + + if (json || showDrvNames) + drvObj.emplace("name", drv->getAttr(state->s.name)->getString()); - else if ( - (attrPath.size() == 2 - && (attrPathS[0] == "defaultPackage" || attrPathS[0] == "devShell" || attrPathS[0] == "formatter")) - || (attrPath.size() == 3 - && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))) { - if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--all-systems' to show)", attrPath.to_string(*state))); + if (showDrvPaths) { + auto drvPath = drv->forceDerivation(); + drvObj.emplace("path", store->printStorePath(drvPath)); } - } else { - try { - if (visitor.isDerivation()) - showDerivation(); - else { - auto name = visitor.getAttrPathStr(state->s.name); - logger->warn(fmt("%s is not a derivation", name)); - } - } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); + + if (showOutputPaths) { + auto outputs = nlohmann::json::object(); + auto drvPath = drv->forceDerivation(); + auto drv = getEvalStore()->derivationFromPath(drvPath); + for (auto & i : drv.outputsAndOptPaths(*store)) { + if (auto outPath = i.second.second) + outputs.emplace(i.first, store->printStorePath(*outPath)); + else + outputs.emplace(i.first, nullptr); } + drvObj.emplace("outputs", std::move(outputs)); } - } - } - else if (attrPath.size() > 0 && attrPathS[0] == "hydraJobs") { - try { - if (visitor.isDerivation()) - showDerivation(); - else - recurse(); - } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); - } + obj.emplace("derivation", std::move(drvObj)); } - } - else if (attrPath.size() > 0 && attrPathS[0] == "legacyPackages") { - if (attrPath.size() == 1) - recurse(); - else if (!showLegacy) { - if (!json) - logger->cout(fmt( - "%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--legacy' to show)", attrPath.to_string(*state))); - } - } else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--all-systems' to show)", attrPath.to_string(*state))); - } - } else { - try { - if (visitor.isDerivation()) - showDerivation(); - else if (attrPath.size() <= 2) - // FIXME: handle recurseIntoAttrs - recurse(); - } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); + if (auto forSystems = leaf.forSystems()) + obj.emplace("forSystems", *forSystems); + }, + + [&](std::function forEachChild) { + auto children = nlohmann::json::object(); + forEachChild([&](Symbol attrName, ref node, bool isLast) { + auto & j = children.emplace(state->symbols[attrName], nlohmann::json::object()).first.value(); + state->spawn(futures, 1, [&visit, &j, node]() { + try { + visit(node, j); + } catch (EvalError & e) { + // FIXME: make it a flake schema attribute whether to ignore evaluation errors. + if (node->root->state.symbols[node->getAttrPath()[0]] == "legacyPackages") + j.emplace("failed", true); + else + throw; } - } - } - } + }); + }); + obj.emplace("children", std::move(children)); + }, - else if ( - (attrPath.size() == 2 && attrPathS[0] == "defaultApp") - || (attrPath.size() == 3 && attrPathS[0] == "apps")) { - auto aType = visitor.maybeGetAttr("type"); - std::optional description; - if (auto aMeta = visitor.maybeGetAttr(state->s.meta)) { - if (auto aDescription = aMeta->maybeGetAttr(state->s.description)) - description = aDescription->getString(); - } - if (!aType || aType->getString() != "app") - state->error("not an app definition").debugThrow(); - if (json) { - j.emplace("type", "app"); - if (description) - j.emplace("description", *description); - } else { - logger->cout( - "%s: app: " ANSI_BOLD "%s" ANSI_NORMAL, - headerPrefix, - description ? *description : "no description"); - } - } + [&](ref node, const std::vector & systems) { + obj.emplace("filtered", true); + }, - else if ( - (attrPath.size() == 1 && attrPathS[0] == "defaultTemplate") - || (attrPath.size() == 2 && attrPathS[0] == "templates")) { - auto description = visitor.getAttr("description")->getString(); - if (json) { - j.emplace("type", "template"); - j.emplace("description", description); - } else { - logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description); - } - } + [&](ref node) { obj.emplace("isLegacy", true); }); + }; - else { - auto [type, description] = (attrPath.size() == 1 && attrPathS[0] == "overlay") - || (attrPath.size() == 2 && attrPathS[0] == "overlays") - ? std::make_pair("nixpkgs-overlay", "Nixpkgs overlay") - : attrPath.size() == 2 && attrPathS[0] == "nixosConfigurations" - ? std::make_pair("nixos-configuration", "NixOS configuration") - : (attrPath.size() == 1 && attrPathS[0] == "nixosModule") - || (attrPath.size() == 2 && attrPathS[0] == "nixosModules") - ? std::make_pair("nixos-module", "NixOS module") - : std::make_pair("unknown", "unknown"); - if (json) { - j.emplace("type", type); - } else { - logger->cout("%s: " ANSI_WARNING "%s" ANSI_NORMAL, headerPrefix, description); - } + auto inv = nlohmann::json::object(); + + flake_schemas::forEachOutput( + inventory, + [&](Symbol outputName, + std::shared_ptr output, + const std::string & doc, + bool isLast) { + auto & j = inv.emplace(state->symbols[outputName], nlohmann::json::object()).first.value(); + + if (output) { + j.emplace("doc", doc); + auto & j2 = j.emplace("output", nlohmann::json::object()).first.value(); + state->spawn(futures, 1, [&visit, output, &j2]() { visit(ref(output), j2); }); + } else + j.emplace("unknown", true); + }); + + futures.finishAll(); + + if (json) { + auto res = nlohmann::json{{"version", 2}, {"inventory", std::move(inv)}}; + printJSON(res); + } else { + + // Render the JSON into a tree representation. + std::function + render; + + render = [&](nlohmann::json j, const std::string & headerPrefix, const std::string & nextPrefix) { + auto what = j.find("what"); + auto filtered = j.find("filtered"); + auto isLegacy = j.find("isLegacy"); + auto derivation = j.find("derivation"); + + auto s = headerPrefix; + + if (what != j.end()) + s += fmt(": %s", (std::string) *what); + + if (derivation != j.end()) { + auto name = derivation->find("name"); + if (name != derivation->end()) + s += fmt(ANSI_ITALIC " [%s]" ANSI_NORMAL, (std::string) *name); } - } catch (EvalError & e) { - if (!(attrPath.size() > 0 && attrPathS[0] == "legacyPackages")) - throw; - } - return j; - }; + if (filtered != j.end() && (bool) *filtered) + s += " " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)"; - auto cache = openEvalCache(*state, ref(flake)); + if (isLegacy != j.end() && (bool) *isLegacy) + s += " " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)"; - auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); - if (json) - printJSON(j); + logger->cout(s); + + auto children = j.find("children"); + + if (children != j.end()) { + for (const auto & [i, child] : enumerate(children->items())) { + bool last = i + 1 == children->size(); + render( + child.value(), + fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, + nextPrefix, + last ? treeLast : treeConn, + child.key()), + nextPrefix + (last ? treeNull : treeLine)); + } + } + }; + + logger->cout("%s", fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef)); + + for (const auto & [i, child] : enumerate(inv.items())) { + bool last = i + 1 == inv.size(); + auto nextPrefix = last ? treeNull : treeLine; + auto output = child.value().find("output"); + auto headerPrefix = fmt( + ANSI_GREEN "%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, last ? treeLast : treeConn, child.key()); + if (output != child.value().end()) + render(*output, headerPrefix, nextPrefix); + else if (child.value().contains("unknown")) + logger->cout(headerPrefix + ANSI_WARNING " unknown flake output" ANSI_NORMAL); + } + } } }; @@ -1499,9 +1098,7 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON { auto originalRef = getFlakeRef(); auto resolvedRef = originalRef.resolve(fetchSettings, *store); - auto [accessor, lockedRef] = resolvedRef.lazyFetch(getEvalState()->fetchSettings, *store); - auto storePath = - fetchToStore(getEvalState()->fetchSettings, *store, accessor, FetchMode::Copy, lockedRef.input.getName()); + auto [storePath, accessor, lockedRef] = resolvedRef.input.fetchToStore(fetchSettings, *store); auto hash = store->queryPathInfo(storePath)->narHash; if (json) { @@ -1510,7 +1107,6 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON res["hash"] = hash.to_string(HashFormat::SRI, true); res["original"] = fetchers::attrsToJSON(resolvedRef.toAttrs()); res["locked"] = fetchers::attrsToJSON(lockedRef.toAttrs()); - res["locked"].erase("__final"); // internal for now printJSON(res); } else { notice( @@ -1547,12 +1143,6 @@ struct CmdFlake : NixMultiCommand #include "flake.md" ; } - - void run() override - { - experimentalFeatureSettings.require(Xp::Flakes); - NixMultiCommand::run(); - } }; static auto rCmdFlake = registerCommand("flake"); diff --git a/src/nix/formatter.cc b/src/nix/formatter.cc index 08f0b5f053ea..8921fcfd8503 100644 --- a/src/nix/formatter.cc +++ b/src/nix/formatter.cc @@ -34,14 +34,9 @@ static auto rCmdFormatter = registerCommand("formatter"); /** Common implementation bits for the `nix formatter` subcommands. */ struct MixFormatter : SourceExprCommand { - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - return Strings{"formatter." + settings.thisSystem.get()}; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - return Strings{}; + return {"nix-fmt"}; } }; diff --git a/src/nix/get-env.sh b/src/nix/get-env.sh index 39fa6f9ac8f5..5b6162b4ba26 100644 --- a/src/nix/get-env.sh +++ b/src/nix/get-env.sh @@ -17,6 +17,7 @@ __functions="$(declare -F)" __dumpEnv() { printf '{\n' + printf ' "version": 1,\n' printf ' "bashFunctions": {\n' local __first=1 diff --git a/src/nix/ls.cc b/src/nix/ls.cc index a9d082fb3782..7c1627446787 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -4,12 +4,13 @@ #include "nix/main/common-args.hh" #include +#include "ls.hh" + using namespace nix; -struct MixLs : virtual Args, MixJSON +struct MixLs : virtual Args, MixJSON, MixLongListing { bool recursive = false; - bool verbose = false; bool showDirectory = false; MixLs() @@ -21,13 +22,6 @@ struct MixLs : virtual Args, MixJSON .handler = {&recursive, true}, }); - addFlag({ - .longName = "long", - .shortName = 'l', - .description = "Show detailed file information.", - .handler = {&verbose, true}, - }); - addFlag({ .longName = "directory", .shortName = 'd', @@ -41,13 +35,13 @@ struct MixLs : virtual Args, MixJSON std::function doPath; auto showFile = [&](const CanonPath & curPath, std::string_view relPath) { - if (verbose) { + if (longListing) { auto st = accessor->lstat(curPath); std::string tp = st.type == SourceAccessor::Type::tRegular ? (st.isExecutable ? "-r-xr-xr-x" : "-r--r--r--") : st.type == SourceAccessor::Type::tSymlink ? "lrwxrwxrwx" : "dr-xr-xr-x"; - auto line = fmt("%s %20d %s", tp, st.fileSize.value_or(0), relPath); + auto line = fmt("%s %9d %s", tp, st.fileSize.value_or(0), relPath); if (st.type == SourceAccessor::Type::tSymlink) line += " -> " + accessor->readLink(curPath); logger->cout(line); diff --git a/src/nix/ls.hh b/src/nix/ls.hh new file mode 100644 index 000000000000..36e61162035f --- /dev/null +++ b/src/nix/ls.hh @@ -0,0 +1,22 @@ +#pragma once + +#include "nix/util/args.hh" + +namespace nix { + +struct MixLongListing : virtual Args +{ + bool longListing = false; + + MixLongListing() + { + addFlag({ + .longName = "long", + .shortName = 'l', + .description = "Show detailed file information.", + .handler = {&longListing, true}, + }); + } +}; + +} // namespace nix diff --git a/src/nix/main.cc b/src/nix/main.cc index 2a790e7be934..2356bc4a487c 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -30,6 +30,9 @@ #include #include +#if HAVE_SENTRY +# include +#endif #ifndef _WIN32 # include @@ -87,6 +90,22 @@ static bool haveInternet() #endif } +static void disableNet() +{ + // FIXME: should check for command line overrides only. + if (!settings.getWorkerSettings().useSubstitutes.overridden) + // FIXME: should not disable local substituters (like file:///). + settings.getWorkerSettings().useSubstitutes = false; + if (!fetchSettings.tarballTtl.overridden) + fetchSettings.tarballTtl = std::numeric_limits::max(); + if (!settings.getNarInfoDiskCacheSettings().ttlMeta.overridden) + settings.getNarInfoDiskCacheSettings().ttlMeta = std::numeric_limits::max(); + if (!fileTransferSettings.tries.overridden) + fileTransferSettings.tries = 0; + if (!fileTransferSettings.connectTimeout.overridden) + fileTransferSettings.connectTimeout = 1; +} + std::string programPath; struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs @@ -103,6 +122,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs categories.clear(); categories[catHelp] = "Help commands"; categories[Command::catDefault] = "Main commands"; + categories[Command::catUndocumented] = "Undocumented commands"; categories[catSecondary] = "Infrequently used commands"; categories[catUtility] = "Utility/scripting commands"; categories[catNixInstallation] = "Commands for upgrading or troubleshooting your Nix installation"; @@ -120,7 +140,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Print full build logs on standard error.", .category = loggingCategory, .handler = {[&]() { logger->setPrintBuildLogs(true); }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -136,7 +155,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Disable substituters and consider all previously downloaded files up-to-date.", .category = miscCategory, .handler = {[&]() { useNet = false; }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -144,7 +162,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Consider all previously downloaded files out-of-date.", .category = miscCategory, .handler = {[&]() { refresh = true; }}, - .experimentalFeature = Xp::NixCommand, }); aliases = { @@ -370,10 +387,8 @@ void mainWrapped(int argc, char ** argv) { savedArgv = argv; - registerCrashHandler(); - /* The chroot helper needs to be run before any threads have been - started. */ + started (including Sentry's worker thread). */ #ifndef _WIN32 if (argc > 0 && argv[0] == chrootHelperName) { chrootHelper(argc, argv); @@ -381,6 +396,43 @@ void mainWrapped(int argc, char ** argv) } #endif + bool sentryEnabled = false; + +#if HAVE_SENTRY + auto sentryEndpoint = getEnv("NIX_SENTRY_ENDPOINT"); + + if (!sentryEndpoint && getEnv("DETSYS_IDS_TELEMETRY") != "disabled") { + try { + auto p = nixConfDir() / "sentry-endpoint"; + if (pathExists(p)) + sentryEndpoint = trim(readFile(p)); + } catch (...) { + ignoreExceptionExceptInterrupt(); + } + } + + if (sentryEndpoint && sentryEndpoint != "") { + sentry_options_t * options = sentry_options_new(); + sentry_options_set_dsn(options, sentryEndpoint->c_str()); + sentry_options_set_database_path(options, (getCacheDir() / "sentry").string().c_str()); + sentry_options_set_release(options, fmt("nix@%s", determinateNixVersion).c_str()); + sentry_options_set_traces_sample_rate(options, 0); + sentry_options_set_auto_session_tracking(options, false); + sentry_options_set_handler_path(options, CRASHPAD_HANDLER_PATH); + sentry_init(options); + sentry_set_tag("nix_command", argc > 0 ? std::string(baseNameOf(argv[0])).c_str() : ""); + sentryEnabled = true; + } + + Finally cleanupSentry([&]() { + if (sentryEnabled) + sentry_shutdown(); + }); +#endif + + if (!sentryEnabled) + registerCrashHandler(); + /* Set the build hook location For builds we perform a self-invocation, so Nix has to be @@ -448,7 +500,6 @@ void mainWrapped(int argc, char ** argv) if (argc == 2 && std::string(argv[1]) == "__dump-language") { experimentalFeatureSettings.experimentalFeatures = { - Xp::Flakes, Xp::FetchClosure, Xp::DynamicDerivations, Xp::FetchTree, @@ -512,6 +563,12 @@ void mainWrapped(int argc, char ** argv) } }); + if (getEnv("NIX_GET_COMPLETIONS")) + /* Avoid fetching stuff during tab completion. We have to this + early because we haven't checked `haveInternet()` yet + (below). */ + disableNet(); + try { auto isNixCommand = programName.ends_with("nix"); auto allowShebang = isNixCommand && argc > 1; @@ -523,16 +580,19 @@ void mainWrapped(int argc, char ** argv) applyJSONLogger(); + printTalkative("Nix %s", version()); + + std::vector subcommand; + MultiCommand * command = &args; + while (command) { + if (command && command->command) { + subcommand.push_back(command->command->first); + command = dynamic_cast(&*command->command->second); + } else + break; + } + if (args.helpRequested) { - std::vector subcommand; - MultiCommand * command = &args; - while (command) { - if (command && command->command) { - subcommand.push_back(command->command->first); - command = dynamic_cast(&*command->command->second); - } else - break; - } showHelp(subcommand, args); return; } @@ -555,20 +615,8 @@ void mainWrapped(int argc, char ** argv) args.useNet = false; } - if (!args.useNet) { - // FIXME: should check for command line overrides only. - if (!settings.getWorkerSettings().useSubstitutes.overridden) - settings.getWorkerSettings().useSubstitutes = false; - if (!fetchSettings.tarballTtl.overridden) - fetchSettings.tarballTtl = std::numeric_limits::max(); - if (!fileTransferSettings.tries.overridden) - fileTransferSettings.tries = 0; - if (!fileTransferSettings.connectTimeout.overridden) - fileTransferSettings.connectTimeout = 1; - auto & ttlMeta = settings.getNarInfoDiskCacheSettings().ttlMeta; - if (!ttlMeta.overridden) - ttlMeta = std::numeric_limits::max(); - } + if (!args.useNet) + disableNet(); if (args.refresh) { fetchSettings.tarballTtl = 0; @@ -581,6 +629,11 @@ void mainWrapped(int argc, char ** argv) evalSettings.pureEval = false; } +#if HAVE_SENTRY + if (sentryEnabled) + sentry_set_tag("nix_subcommand", concatStringsSep(" ", subcommand).c_str()); +#endif + try { args.command->second->run(); } catch (eval_cache::CachedEvalError & e) { @@ -595,15 +648,15 @@ void mainWrapped(int argc, char ** argv) int main(int argc, char ** argv) { + using namespace nix; + // The CLI has a more detailed version than the libraries; see nixVersion. - nix::nixVersion = NIX_CLI_VERSION; + nixVersion = NIX_CLI_VERSION; #ifndef _WIN32 // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. - // This used to be 64 MiB, but macOS as deployed on GitHub Actions has a - // hard limit slightly under that, so we round it down a bit. - nix::setStackSize(60 * 1024 * 1024); + setStackSize(evalStackSize); #endif - return nix::handleExceptions(argv[0], [&]() { nix::mainWrapped(argc, argv); }); + return handleExceptions(argv[0], [&]() { mainWrapped(argc, argv); }); } diff --git a/src/nix/meson.build b/src/nix/meson.build index 039b202b937d..f5602e730013 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -50,6 +50,17 @@ mandir = get_option('mandir') mandir = fs.is_absolute(mandir) ? mandir : prefix / mandir configdata.set_quoted('NIX_MAN_DIR', mandir) +sentry_required = get_option('sentry') +configdata.set('HAVE_SENTRY', sentry_required.enabled().to_int()) +if sentry_required.enabled() + crashpad_handler_path = get_option('crashpad-handler') + assert( + crashpad_handler_path != '', + 'crashpad-handler path must be set when sentry is enabled', + ) + configdata.set_quoted('CRASHPAD_HANDLER_PATH', crashpad_handler_path) +endif + config_priv_h = configure_file( configuration : configdata, output : 'cli-config-private.hh', @@ -68,6 +79,7 @@ nix_sources = [ config_priv_h ] + files( 'config.cc', 'copy.cc', 'crash-handler.cc', + 'crash.cc', 'derivation-add.cc', 'derivation-show.cc', 'derivation.cc', @@ -78,6 +90,7 @@ nix_sources = [ config_priv_h ] + files( 'env.cc', 'eval.cc', 'flake-prefetch-inputs.cc', + 'flake-prefetch-inputs.cc', 'flake.cc', 'formatter.cc', 'hash.cc', @@ -87,11 +100,14 @@ nix_sources = [ config_priv_h ] + files( 'make-content-addressed.cc', 'man-pages.cc', 'nar.cc', + 'nario.cc', 'optimise-store.cc', 'path-from-hash-part.cc', 'path-info.cc', 'prefetch.cc', 'profile.cc', + 'provenance.cc', + 'ps.cc', 'realisation.cc', 'registry.cc', 'repl.cc', @@ -187,7 +203,7 @@ this_exe = executable( sources, dependencies : deps_private_subproject + deps_private + deps_other, include_directories : include_dirs, - link_args : linker_export_flags, + link_args : linker_export_flags + (sentry_required.enabled() ? [ '-lsentry' ] : []), install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/nix/meson.options b/src/nix/meson.options index 0fc680cfe4c7..61da4fad7eed 100644 --- a/src/nix/meson.options +++ b/src/nix/meson.options @@ -7,3 +7,17 @@ option( value : 'etc/profile.d', description : 'the path to install shell profile files', ) + +option( + 'sentry', + type : 'feature', + value : 'auto', + description : 'Enable Sentry crash reporting', +) + +option( + 'crashpad-handler', + type : 'string', + value : '', + description : 'Path to the crashpad_handler binary (required when sentry is enabled)', +) diff --git a/src/nix/nario-export.md b/src/nix/nario-export.md new file mode 100644 index 000000000000..2480733c1cae --- /dev/null +++ b/src/nix/nario-export.md @@ -0,0 +1,29 @@ +R""( + +# Examples + +* Export the closure of the build of `nixpkgs#hello`: + + ```console + # nix nario export --format 2 -r nixpkgs#hello > dump.nario + ``` + + It can be imported into another store: + + ```console + # nix nario import --no-check-sigs < dump.nario + ``` + +# Description + +This command prints to standard output a serialization of the specified store paths in `nario` format. This serialization can be imported into another store using `nix nario import`. + +References of a path are not exported by default; use `-r` to export a complete closure. +Paths are exported in topologically sorted order (i.e. if path `X` refers to `Y`, then `Y` appears before `X`). +You must specify the desired `nario` version. Currently the following versions are supported: + +* `1`: This version is compatible with the legacy `nix-store --export` and `nix-store --import` commands. It should be avoided because it is not memory-efficient on import. It does not support signatures, so you have to use `--no-check-sigs` on import. + +* `2`: The latest version. Recommended. + +)"" diff --git a/src/nix/nario-import.md b/src/nix/nario-import.md new file mode 100644 index 000000000000..9cba60c62203 --- /dev/null +++ b/src/nix/nario-import.md @@ -0,0 +1,15 @@ +R""( + +# Examples + +* Import store paths from the file named `dump`: + + ```console + # nix nario import < dump.nario + ``` + +# Description + +This command reads from standard input a serialization of store paths produced by `nix nario export` and adds them to the Nix store. + +)"" diff --git a/src/nix/nario-list.md b/src/nix/nario-list.md new file mode 100644 index 000000000000..c050457b3657 --- /dev/null +++ b/src/nix/nario-list.md @@ -0,0 +1,43 @@ +R""( + +# Examples + +* List the contents of a nario file: + + ```console + # nix nario list < dump.nario + /nix/store/f671jqvjcz37fsprzqn5jjsmyjj69p9b-xgcc-14.2.1.20250322-libgcc: 201856 bytes + /nix/store/n7iwblclbrz20xinvy4cxrvippdhvqll-libunistring-1.3: 2070240 bytes + … + ``` + +* Use `--json` to get detailed information in JSON format: + + ```console + # nix nario list --json < dump.nario + { + "paths": { + "/nix/store/m1r53pnn…-hello-2.12.1": { + "ca": null, + "deriver": "/nix/store/qa8is0vm…-hello-2.12.1.drv", + "narHash": "sha256-KSCYs4J7tFa+oX7W5M4D7ZYNvrWtdcWTdTL5fQk+za8=", + "narSize": 234672, + "references": [ + "/nix/store/g8zyryr9…-glibc-2.40-66", + "/nix/store/m1r53pnn…-hello-2.12.1" + ], + "registrationTime": 1756900709, + "signatures": [ "cache.nixos.org-1:QbG7A…" ], + "ultimate": false + }, + … + }, + "version": 1 + } + ``` + +# Description + +This command lists the contents of a nario file read from standard input. + +)"" diff --git a/src/nix/nario.cc b/src/nix/nario.cc new file mode 100644 index 000000000000..9e83e6e0709f --- /dev/null +++ b/src/nix/nario.cc @@ -0,0 +1,346 @@ +#include "nix/cmd/command.hh" +#include "nix/main/shared.hh" +#include "nix/store/store-api.hh" +#include "nix/store/export-import.hh" +#include "nix/util/callback.hh" +#include "nix/util/fs-sink.hh" +#include "nix/util/archive.hh" + +#include "ls.hh" + +#include + +using namespace nix; + +struct CmdNario : NixMultiCommand +{ + CmdNario() + : NixMultiCommand("nario", RegisterCommand::getCommandsFor({"nario"})) + { + } + + std::string description() override + { + return "operations for manipulating nario files"; + } + + Category category() override + { + return catUtility; + } +}; + +static auto rCmdNario = registerCommand("nario"); + +struct CmdNarioExport : StorePathsCommand +{ + unsigned int version = 0; + + CmdNarioExport() + { + addFlag({ + .longName = "format", + .description = "Version of the nario format to use. Must be `1` or `2`.", + .labels = {"nario-format"}, + .handler = {&version}, + .required = true, + }); + } + + std::string description() override + { + return "serialize store paths to standard output in nario format"; + } + + std::string doc() override + { + return +#include "nario-export.md" + ; + } + + void run(ref store, StorePaths && storePaths) override + { + auto fd = getStandardOutput(); + if (isatty(fd)) + throw UsageError("refusing to write nario to a terminal"); + FdSink sink(std::move(fd)); + exportPaths(*store, StorePathSet(storePaths.begin(), storePaths.end()), sink, version); + } +}; + +static auto rCmdNarioExport = registerCommand2({"nario", "export"}); + +static FdSource getNarioSource() +{ + auto fd = getStandardInput(); + if (isatty(fd)) + throw UsageError("refusing to read nario from a terminal"); + return FdSource(std::move(fd)); +} + +struct CmdNarioImport : StoreCommand, MixNoCheckSigs +{ + std::string description() override + { + return "import store paths from a nario file on standard input"; + } + + std::string doc() override + { + return +#include "nario-import.md" + ; + } + + void run(ref store) override + { + auto source{getNarioSource()}; + importPaths(*store, source, checkSigs); + } +}; + +static auto rCmdNarioImport = registerCommand2({"nario", "import"}); + +nlohmann::json listNar(Source & source) +{ + struct : FileSystemObjectSink + { + nlohmann::json root = nlohmann::json::object(); + + nlohmann::json & makeObject(const CanonPath & path, std::string_view type) + { + auto * cur = &root; + for (auto & c : path) { + assert((*cur)["type"] == "directory"); + auto i = (*cur)["entries"].emplace(c, nlohmann::json::object()).first; + cur = &i.value(); + } + auto inserted = cur->emplace("type", type).second; + assert(inserted); + return *cur; + } + + void createDirectory(const CanonPath & path) override + { + auto & j = makeObject(path, "directory"); + j["entries"] = nlohmann::json::object(); + } + + void createRegularFile(const CanonPath & path, fun func) override + { + struct : CreateRegularFileSink + { + bool executable = false; + std::optional size; + + void operator()(std::string_view data) override {} + + void preallocateContents(uint64_t s) override + { + size = s; + } + + void isExecutable() override + { + executable = true; + } + } crf; + + crf.skipContents = true; + + func(crf); + + auto & j = makeObject(path, "regular"); + j.emplace("size", crf.size.value()); + if (crf.executable) + j.emplace("executable", true); + } + + void createSymlink(const CanonPath & path, const std::string & target) override + { + auto & j = makeObject(path, "symlink"); + j.emplace("target", target); + } + + } parseSink; + + parseDump(parseSink, source); + + return parseSink.root; +} + +void renderNarListing(const CanonPath & prefix, const nlohmann::json & root, bool longListing) +{ + std::function recurse; + recurse = [&](const nlohmann::json & json, const CanonPath & path) { + auto type = json["type"]; + + if (longListing) { + auto tp = type == "regular" ? (json.find("executable") != json.end() ? "-r-xr-xr-x" : "-r--r--r--") + : type == "symlink" ? "lrwxrwxrwx" + : "dr-xr-xr-x"; + auto line = fmt("%s %9d %s", tp, type == "regular" ? (uint64_t) json["size"] : 0, prefix / path); + if (type == "symlink") + line += " -> " + (std::string) json["target"]; + logger->cout(line); + } else + logger->cout(fmt("%s", prefix / path)); + + if (type == "directory") { + for (auto & entry : json["entries"].items()) { + recurse(entry.value(), path / entry.key()); + } + } + }; + + recurse(root, CanonPath::root); +} + +struct CmdNarioList : Command, MixJSON, MixLongListing +{ + bool listContents = false; + + CmdNarioList() + { + addFlag({ + .longName = "recursive", + .shortName = 'R', + .description = "List the contents of NARs inside the nario.", + .handler = {&listContents, true}, + }); + } + + std::string description() override + { + return "list the contents of a nario file"; + } + + std::string doc() override + { + return +#include "nario-list.md" + ; + } + + void run() override + { + struct Config : StoreConfig + { + Config(const Params & params) + : StoreConfig(params) + { + } + + ref openStore() const override + { + abort(); + } + }; + + struct ListingStore : Store + { + std::optional json; + CmdNarioList & cmd; + + ListingStore(ref config, CmdNarioList & cmd) + : Store{*config} + , cmd(cmd) + { + } + + void queryPathInfoUncached( + const StorePath & path, Callback> callback) noexcept override + { + callback(nullptr); + } + + std::optional isTrustedClient() override + { + return Trusted; + } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { + return std::nullopt; + } + + void + addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override + { + std::optional contents; + if (cmd.listContents) + contents = listNar(source); + else + source.skip(info.narSize); + + if (json) { + // FIXME: make the JSON format configurable. + auto obj = info.toJSON(this, true, PathInfoJsonFormat::V1); + if (contents) + obj.emplace("contents", *contents); + json->emplace(printStorePath(info.path), std::move(obj)); + } else { + if (contents) + renderNarListing(CanonPath(printStorePath(info.path)), *contents, cmd.longListing); + else + logger->cout(fmt("%s: %d bytes", printStorePath(info.path), info.narSize)); + } + } + + StorePath addToStoreFromDump( + Source & dump, + std::string_view name, + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override + { + unsupported("addToStoreFromDump"); + } + + void narFromPath(const StorePath & path, Sink & sink) override + { + unsupported("narFromPath"); + } + + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override + { + callback(nullptr); + } + + ref getFSAccessor(bool requireValidPath) override + { + return makeEmptySourceAccessor(); + } + + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + unsupported("getFSAccessor"); + } + + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + }; + + auto source{getNarioSource()}; + auto config = make_ref(StoreConfig::Params()); + ListingStore lister(config, *this); + if (json) + lister.json = nlohmann::json::object(); + importPaths(lister, source, NoCheckSigs); + if (json) { + auto j = nlohmann::json::object(); + j["version"] = 1; + j["paths"] = std::move(*lister.json); + printJSON(j); + } + } +}; + +static auto rCmdNarioList = registerCommand2({"nario", "list"}); diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index cda68006a01d..27ac286bd968 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -458,7 +458,9 @@ static void main_nix_build(int argc, char ** argv) throw UsageError("nix-shell requires a single derivation"); auto & packageInfo = drvs.front(); - auto drv = evalStore->derivationFromPath(packageInfo.requireDrvPath()); + auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); + auto drv = evalStore->derivationFromPath(drvPath); std::vector pathsToBuild; RealisedPath::Set pathsToCopy; @@ -482,6 +484,7 @@ static void main_nix_build(int argc, char ** argv) throw Error("the 'bashInteractive' attribute in did not evaluate to a derivation"); auto bashDrv = drv->requireDrvPath(); + state->waitForPath(bashDrv); pathsToBuild.push_back( DerivedPath::Built{ .drvPath = makeConstantStorePathRef(bashDrv), @@ -691,6 +694,7 @@ static void main_nix_build(int argc, char ** argv) for (auto & packageInfo : drvs) { auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); auto outputName = packageInfo.queryOutputName(); if (outputName == "") diff --git a/src/nix/nix-channel/nix-channel.cc b/src/nix/nix-channel/nix-channel.cc index 05adc3d2cda8..e02426f13188 100644 --- a/src/nix/nix-channel/nix-channel.cc +++ b/src/nix/nix-channel/nix-channel.cc @@ -203,6 +203,11 @@ static void update(const StringSet & channelNames) static int main_nix_channel(int argc, char ** argv) { + warn( + "nix-channel is deprecated in favor of flakes in Determinate Nix. \ +See https://zero-to-nix.com for a guide to Nix flakes. \ +For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34."); + { // Figure out the name of the `.nix-channels' file to use auto home = getHome(); diff --git a/src/nix/nix-env/nix-env.cc b/src/nix/nix-env/nix-env.cc index a04ea7f41f06..d890a74837a9 100644 --- a/src/nix/nix-env/nix-env.cc +++ b/src/nix/nix-env/nix-env.cc @@ -496,6 +496,7 @@ static void printMissing(EvalState & state, PackageInfos & elems) std::vector targets; for (auto & i : elems) if (auto drvPath = i.queryDrvPath()) { + state.waitForPath(*drvPath); auto path = DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), .outputs = OutputsSpec::All{}, @@ -1096,7 +1097,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) continue; /* For table output. */ - std::vector columns; + TableRow columns; /* For XML output. */ XMLAttrs attrs; diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index f87e7f6103a9..eab668a9206a 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -42,8 +42,10 @@ bool createUserEnv( exist already. */ std::vector drvsToBuild; for (auto & i : elems) - if (auto drvPath = i.queryDrvPath()) + if (auto drvPath = i.queryDrvPath()) { + state.waitForPath(*drvPath); drvsToBuild.push_back({*drvPath}); + } debug("building user environment dependencies"); state.store->buildPaths(toDerivedPaths(drvsToBuild), state.repair ? bmRepair : bmNormal); @@ -156,6 +158,7 @@ bool createUserEnv( debug("building user environment"); std::vector topLevelDrvs; topLevelDrvs.push_back({topLevelDrv}); + state.waitForPath(topLevelDrv); state.store->buildPaths(toDerivedPaths(topLevelDrvs), state.repair ? bmRepair : bmNormal); /* Switch the current user environment to the output path. */ diff --git a/src/nix/nix-instantiate/nix-instantiate.cc b/src/nix/nix-instantiate/nix-instantiate.cc index 82838e2ae8ae..9861b52c9ad2 100644 --- a/src/nix/nix-instantiate/nix-instantiate.cc +++ b/src/nix/nix-instantiate/nix-instantiate.cc @@ -17,6 +17,8 @@ #include #include +#include + using namespace nix; std::filesystem::path gcRoot; @@ -56,14 +58,18 @@ void processExpr( else state.autoCallFunction(autoArgs, v, vRes); if (output == okRaw) - std::cout << *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"); + std::cout << state.devirtualize( + *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"), + context); // We intentionally don't output a newline here. The default PS1 for Bash in NixOS starts with a newline // and other interactive shells like Zsh are smart enough to print a missing newline before the prompt. - else if (output == okXML) - printValueAsXML(state, strict, location, vRes, std::cout, context, noPos); - else if (output == okJSON) { - printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context); - std::cout << std::endl; + else if (output == okXML) { + std::ostringstream s; + printValueAsXML(state, strict, location, vRes, s, context, noPos); + std::cout << state.devirtualize(s.str(), context); + } else if (output == okJSON) { + auto j = printValueAsJSON(state, strict, vRes, v.determinePos(noPos), context); + std::cout << state.devirtualize(j.dump(), context) << std::endl; } else { if (strict) state.forceValueDeep(vRes); diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index fb2018cff987..d6649d3e96dd 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -772,7 +772,7 @@ static void opExport(Strings opFlags, Strings opArgs) paths.insert(store->followLinksToStorePath(i)); FdSink sink(getStandardOutput()); - exportPaths(*store, paths, sink); + exportPaths(*store, paths, sink, 1); sink.flush(); } diff --git a/src/nix/nix.md b/src/nix/nix.md index fed781aa77fa..cc31dabbab41 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -48,11 +48,6 @@ manual](https://nix.dev/manual/nix/stable/). # Installables -> **Warning** \ -> Installables are part of the unstable -> [`nix-command` experimental feature](@docroot@/development/experimental-features.md#xp-feature-nix-command), -> and subject to change without notice. - Many `nix` subcommands operate on one or more *installables*. These are command line arguments that represent something that can be realised in the Nix store. @@ -72,13 +67,6 @@ That is, Nix will operate on the default flake output attribute of the flake in ### Flake output attribute -> **Warning** \ -> Flake output attribute installables depend on both the -> [`flakes`](@docroot@/development/experimental-features.md#xp-feature-flakes) -> and -> [`nix-command`](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> experimental features, and subject to change without notice. - Example: `nixpkgs#hello` These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a diff --git a/src/nix/package.nix b/src/nix/package.nix index 8195e6c6ff5a..11962c466f45 100644 --- a/src/nix/package.nix +++ b/src/nix/package.nix @@ -2,11 +2,13 @@ stdenv, lib, mkMesonExecutable, + llvmPackages, nix-store, nix-expr, nix-main, nix-cmd, + sentry-native, # Configuration Options @@ -15,10 +17,11 @@ let inherit (lib) fileset; + enableSentry = !stdenv.hostPlatform.isStatic; in mkMesonExecutable (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; inherit version; workDir = ./.; @@ -69,16 +72,38 @@ mkMesonExecutable (finalAttrs: { nix-expr nix-main nix-cmd - ]; + ] + ++ lib.optional ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) llvmPackages.libunwind + ++ lib.optional enableSentry sentry-native; mesonFlags = [ - ]; + (lib.mesonEnable "sentry" enableSentry) + ] + ++ lib.optional enableSentry ( + lib.mesonOption "crashpad-handler" "${sentry-native}/bin/crashpad_handler" + ); postInstall = lib.optionalString stdenv.hostPlatform.isStatic '' mkdir -p $out/nix-support echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products ''; + # Fixes a problem with the "nix-cli-libcxxStdenv-static" package output. + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt & libunwind over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + NIX_CFLAGS_COMPILE = lib.optionalString ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) "-rtlib=compiler-rt -unwindlib=libunwind"; + meta = { mainProgram = "nix"; platforms = lib.platforms.unix ++ lib.platforms.windows; diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 3a7e448cda24..be7aacf3103a 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -15,6 +15,8 @@ #include "nix/util/environment-variables.hh" #include "nix/util/url.hh" #include "nix/store/path.hh" +#include "nix/util/override-provenance-source-accessor.hh" +#include "nix/fetchers/provenance.hh" #include "man-pages.hh" @@ -145,7 +147,15 @@ std::tuple prefetchFile( Activity act(*logger, lvlChatty, actUnknown, fmt("adding '%s' to the store", url.to_string())); - auto info = store->addToStoreSlow(name, makeFSSourceAccessor(tmpFile), method, hashAlgo, {}, expectedHash); + auto info = store->addToStoreSlow( + name, + {make_ref( + makeFSSourceAccessor(tmpFile), + unpack ? nullptr : std::make_shared(url.to_string()))}, + method, + hashAlgo, + {}, + expectedHash); storePath = info.path; assert(info.ca); hash = info.ca->hash; diff --git a/src/nix/profile-history.md b/src/nix/profile-history.md index f0bfe5037912..0c9a340ddf0d 100644 --- a/src/nix/profile-history.md +++ b/src/nix/profile-history.md @@ -7,7 +7,7 @@ R""( ```console # nix profile history Version 508 (2020-04-10): - flake:nixpkgs#legacyPackages.x86_64-linux.awscli: ∅ -> 1.17.13 + flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 added Version 509 (2020-05-16) <- 508: flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 -> 1.18.211 @@ -20,7 +20,7 @@ between subsequent versions of a profile. It only shows top-level packages, not dependencies; for that, use [`nix profile diff-closures`](./nix3-profile-diff-closures.md). -The addition of a package to a profile is denoted by the string `∅ ->` -*version*, whereas the removal is denoted by *version* `-> ∅`. +The addition of a package to a profile is denoted by the string +*version* `added`, whereas the removal is denoted by *version* ` removed`. )"" diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 4f563199c02c..5a856e5e60ce 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -288,11 +288,11 @@ struct ProfileManifest while (i != prev.elements.end() || j != cur.elements.end()) { if (j != cur.elements.end() && (i == prev.elements.end() || i->first > j->first)) { - logger->cout("%s%s: ∅ -> %s", indent, j->second.identifier(), j->second.versions()); + logger->cout("%s%s: %s added", indent, j->second.identifier(), j->second.versions()); changes = true; ++j; } else if (i != prev.elements.end() && (j == cur.elements.end() || i->first < j->first)) { - logger->cout("%s%s: %s -> ∅", indent, i->second.identifier(), i->second.versions()); + logger->cout("%s%s: %s removed", indent, i->second.identifier(), i->second.versions()); changes = true; ++i; } else { @@ -313,11 +313,11 @@ struct ProfileManifest }; static std::map>> -builtPathsPerInstallable(const std::vector, BuiltPathWithResult>> & builtPaths) +builtPathsPerInstallable(const std::vector & builtPaths) { std::map>> res; - for (auto & [installable, builtPath] : builtPaths) { - auto & r = res.insert({&*installable, + for (auto & b : builtPaths) { + auto & r = res.insert({&*b.installable, { {}, make_ref(), @@ -327,6 +327,7 @@ builtPathsPerInstallable(const std::vector, BuiltPath (e.g. meta.priority fields) if the installable returned multiple derivations. So pick one arbitrarily. FIXME: print a warning? */ + auto builtPath = b.getSuccess(); r.first.push_back(builtPath.path); r.second = builtPath.info; } @@ -363,8 +364,10 @@ struct CmdProfileAdd : InstallablesCommand, MixDefaultProfile { ProfileManifest manifest(*getEvalState(), *profile); - auto builtPaths = builtPathsPerInstallable( - Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal)); + auto buildResults = Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal); + Installable::throwBuildErrors(buildResults, *store); + + auto builtPaths = builtPathsPerInstallable(buildResults); for (auto & installable : installables) { ProfileElement element; @@ -558,10 +561,24 @@ struct AllMatcher final : public Matcher AllMatcher all; -class MixProfileElementMatchers : virtual Args, virtual StoreCommand +class MixProfileElementMatchers : virtual Args, virtual StoreCommand, public virtual MixDefaultProfile { std::vector> _matchers; + void completeProfileElements(AddCompletions & completions, std::string_view prefix) + { + auto * evalCmd = dynamic_cast(this); + if (!evalCmd) + return; + + auto evalState = evalCmd->getEvalState(); + ProfileManifest manifest(*evalState, *profile); + + for (auto & [name, element] : manifest.elements) + if (name.starts_with(prefix)) + completions.add(name, element.identifier()); + } + public: MixProfileElementMatchers() @@ -592,6 +609,9 @@ class MixProfileElementMatchers : virtual Args, virtual StoreCommand _matchers.push_back(make_ref(arg)); } } + }}, + .completer = {[this](AddCompletions & completions, size_t, std::string_view prefix) { + completeProfileElements(completions, prefix); }}}); } @@ -632,7 +652,7 @@ class MixProfileElementMatchers : virtual Args, virtual StoreCommand } }; -struct CmdProfileRemove : virtual EvalCommand, MixDefaultProfile, MixProfileElementMatchers +struct CmdProfileRemove : virtual EvalCommand, MixProfileElementMatchers { std::string description() override { @@ -672,7 +692,7 @@ struct CmdProfileRemove : virtual EvalCommand, MixDefaultProfile, MixProfileElem } }; -struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProfileElementMatchers +struct CmdProfileUpgrade : virtual SourceExprCommand, MixProfileElementMatchers { std::string description() override { @@ -726,11 +746,11 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf this, getEvalState(), FlakeRef(element.source->originalRef), - "", + "." + element.source->attrPath, // absolute lookup element.source->outputs, - Strings{element.source->attrPath}, - Strings{}, - lockFlags); + StringSet{}, + lockFlags, + getDefaultFlakeSchemas()); auto derivedPaths = installable->toDerivedPaths(); if (derivedPaths.empty()) @@ -766,8 +786,10 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf return; } - auto builtPaths = builtPathsPerInstallable( - Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal)); + auto buildResults = Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal); + Installable::throwBuildErrors(buildResults, *store); + + auto builtPaths = builtPathsPerInstallable(buildResults); for (size_t i = 0; i < installables.size(); ++i) { auto & installable = installables.at(i); diff --git a/src/nix/provenance-show.md b/src/nix/provenance-show.md new file mode 100644 index 000000000000..526fbd54c8ea --- /dev/null +++ b/src/nix/provenance-show.md @@ -0,0 +1,29 @@ +R""( + +# Examples + +* Show the provenance of a store path: + + ```console + # nix provenance show /run/current-system + /nix/store/k145bdxhdb89i4fkvgdisdz1yh2wiymm-nixos-system-machine-25.05.20251210.d2b1213 + ← copied from cache.flakehub.com + ← built from derivation /nix/store/w3p3xkminq61hs00kihd34w1dglpj5s9-nixos-system-machine-25.05.20251210.d2b1213.drv (output out) on build-machine for x86_64-linux + ← instantiated from flake output github:my-org/my-repo/6b03eb949597fe96d536e956a2c14da9901dbd21?dir=machine#nixosConfigurations.machine.config.system.build.toplevel + ``` + +# Description + +Show the provenance chain of one or more store paths. For each store path, this displays where it came from: what binary cache it was copied from, what flake it was built from, and so on. + +The provenance chain shows the history of how the store path came to exist, including: + +- **Copied**: The path was copied from another Nix store, typically a binary cache. +- **Built**: The path was built from a derivation. +- **Flake evaluation**: The derivation was instantiated during the evaluation of a flake output. +- **Fetched**: The path was obtained by fetching a source tree. +- **Meta**: Metadata associated with the derivation. + +Note: if you want provenance in JSON format, use the `provenance` field returned by `nix path-info --json`. + +)"" diff --git a/src/nix/provenance-verify.md b/src/nix/provenance-verify.md new file mode 100644 index 000000000000..e05cb95bf249 --- /dev/null +++ b/src/nix/provenance-verify.md @@ -0,0 +1,21 @@ +R""( + +# Examples + +* Verify the provenance of a store path: + + ```console + # nix provenance verify /run/current-system + ``` + +# Description + +Verify the provenance of one or more store paths. This checks whether the store paths can be rebuilt from source. Specifically, it verifies the following: + +* That source trees can be fetched. +* That flake evaluations result in the instantiation of the desired store paths (most commonly, store derivations). +* That derivations can be successfully rebuilt, producing identical outputs. + +A non-zero exit code is returned if any of the verifications fail. + +)"" diff --git a/src/nix/provenance.cc b/src/nix/provenance.cc new file mode 100644 index 000000000000..be937afb961d --- /dev/null +++ b/src/nix/provenance.cc @@ -0,0 +1,582 @@ +#include "nix/cmd/command.hh" +#include "nix/store/store-api.hh" +#include "nix/store/store-open.hh" +#include "nix/expr/provenance.hh" +#include "nix/store/provenance.hh" +#include "nix/flake/provenance.hh" +#include "nix/fetchers/provenance.hh" +#include "nix/util/provenance.hh" +#include "nix/util/json-utils.hh" +#include "nix/fetchers/fetch-to-store.hh" +#include "nix/util/exit.hh" +#include "nix/cmd/installable-flake.hh" +#include "nix/store/derivations.hh" +#include "nix/store/filetransfer.hh" +#include "nix/util/callback.hh" +#include "nix/util/terminal.hh" + +#include +#include +#include +#include +#include + +#define TAB " " + +using namespace nix; + +struct CmdProvenance : NixMultiCommand +{ + CmdProvenance() + : NixMultiCommand("provenance", RegisterCommand::getCommandsFor({"provenance"})) + { + } + + std::string description() override + { + return "query and check the provenance of store paths"; + } + + std::optional experimentalFeature() override + { + return Xp::Provenance; + } + + Category category() override + { + return catSecondary; + } +}; + +static auto rCmdProvenance = registerCommand("provenance"); + +struct CmdProvenanceShow : StorePathsCommand +{ + std::string description() override + { + return "show the provenance chain of store paths"; + } + + std::string doc() override + { + return +#include "provenance-show.md" + ; + } + + void displayProvenance(Store & store, const StorePath & path, std::shared_ptr provenance) + { + while (provenance) { + if (auto copied = std::dynamic_pointer_cast(provenance)) { + logger->cout("← copied from " ANSI_BOLD "%s" ANSI_NORMAL, copied->from); + provenance = copied->next; + } + + else if (auto build = std::dynamic_pointer_cast(provenance)) { + logger->cout( + "← built from derivation " ANSI_BOLD "%s" ANSI_NORMAL " (output " ANSI_BOLD "%s" ANSI_NORMAL + ") on " ANSI_BOLD "%s" ANSI_NORMAL " for " ANSI_BOLD "%s" ANSI_NORMAL, + store.printStorePath(build->drvPath), + build->output, + build->buildHost.value_or("unknown host").c_str(), + build->system); + for (auto & [tagName, tagValue] : build->tags) + logger->cout( + " tag " ANSI_BOLD "%s" ANSI_NORMAL ": %s", tagName, filterANSIEscapes(tagValue, true)); + provenance = build->next; + } + + else if (auto flake = std::dynamic_pointer_cast(provenance)) { + // Collapse subpath/tree provenance into the flake provenance for legibility. + auto next = flake->next; + CanonPath flakePath("/flake.nix"); + if (auto subpath = std::dynamic_pointer_cast(next)) { + next = subpath->next; + flakePath = subpath->subpath; + } + if (auto tree = std::dynamic_pointer_cast(next)) { + FlakeRef flakeRef( + fetchers::Input::fromAttrs(fetchSettings, fetchers::jsonToAttrs(*tree->attrs)), + std::string(flakePath.parent().value_or(CanonPath::root).rel())); + logger->cout( + "← %sinstantiated from %sflake output " ANSI_BOLD "%s#%s" ANSI_NORMAL, + flake->pure ? "" : ANSI_RED "impurely" ANSI_NORMAL " ", + flakeRef.input.isLocked(fetchSettings) ? "" : ANSI_RED "unlocked" ANSI_NORMAL " ", + flakeRef.to_string(), + flake->flakeOutput); + break; + } else { + logger->cout("← instantiated from flake output " ANSI_BOLD "%s" ANSI_NORMAL, flake->flakeOutput); + provenance = flake->next; + } + } + + else if (auto tree = std::dynamic_pointer_cast(provenance)) { + auto input = fetchers::Input::fromAttrs(fetchSettings, fetchers::jsonToAttrs(*tree->attrs)); + logger->cout( + "← from %stree " ANSI_BOLD "%s" ANSI_NORMAL, + input.isLocked(fetchSettings) ? "" : ANSI_RED "unlocked" ANSI_NORMAL " ", + input.to_string()); + break; + } + + else if (auto subpath = std::dynamic_pointer_cast(provenance)) { + logger->cout("← from file " ANSI_BOLD "%s" ANSI_NORMAL, subpath->subpath.abs()); + provenance = subpath->next; + } + + else if (auto drv = std::dynamic_pointer_cast(provenance)) { + logger->cout("← with derivation metadata"); + std::istringstream stream((*drv->meta).dump(2)); + for (std::string line; std::getline(stream, line);) { + logger->cout(" %s", line); + } + provenance = drv->next; + } + + else if (auto fetchurl = std::dynamic_pointer_cast(provenance)) { + logger->cout("← fetched from URL " ANSI_BOLD "%s" ANSI_NORMAL, fetchurl->url); + break; + } + + else { + // Unknown or unhandled provenance type + auto json = provenance->to_json(); + auto typeIt = json.find("type"); + if (typeIt != json.end() && typeIt->is_string()) + logger->cout("← " ANSI_RED "unknown provenance type '%s'" ANSI_NORMAL, typeIt->get()); + else + logger->cout("← " ANSI_RED "unknown provenance type" ANSI_NORMAL); + break; + } + } + } + + void run(ref store, StorePaths && storePaths) override + { + bool first = true; + + for (auto & storePath : storePaths) { + auto info = store->queryPathInfo(storePath); + if (!first) + logger->cout(""); + first = false; + logger->cout(ANSI_BOLD "%s" ANSI_NORMAL, store->printStorePath(info->path)); + + if (info->provenance) + displayProvenance(*store, storePath, info->provenance); + else + logger->cout(ANSI_RED " (no provenance information available)" ANSI_NORMAL); + } + } +}; + +static auto rCmdProvenanceShow = registerCommand2({"provenance", "show"}); + +/** + * A wrapper around an arbitrary store that intercepts `addToStore()` + * and `addToStoreFromDump()` calls to keep track of added paths. + */ +struct TrackingStore : public Store +{ + ref next; + boost::unordered_flat_set instantiatedPaths; + + TrackingStore(ref next) + : Store(next->config) + , next(next) + { + } + + void addToStore(const ValidPathInfo & info, Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs) override + { + next->addToStore(info, narSource, repair, checkSigs); + instantiatedPaths.insert(info.path); + // FIXME: we should really just disable the path info cache, since the underlying store already does caching. + invalidatePathInfoCacheFor(info.path); + } + + StorePath addToStore( + std::string_view name, + const SourcePath & path, + ContentAddressMethod method, + HashAlgorithm hashAlgo, + const StorePathSet & references, + PathFilter & filter, + RepairFlag repair) override + { + auto storePath = next->addToStore(name, path, method, hashAlgo, references, filter, repair); + instantiatedPaths.insert(storePath); + invalidatePathInfoCacheFor(storePath); + return storePath; + } + + StorePath addToStoreFromDump( + Source & dump, + std::string_view name, + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override + { + auto storePath = + next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair, provenance); + instantiatedPaths.insert(storePath); + invalidatePathInfoCacheFor(storePath); + return storePath; + } + + void queryPathInfoUncached( + const StorePath & path, Callback> callback) noexcept override + { + try { + callback(std::make_shared(*next->queryPathInfo(path))); + } catch (InvalidPath &) { + callback(nullptr); + } catch (...) { + callback.rethrow(); + } + } + + void queryRealisationUncached( + const DrvOutput & output, Callback> callback) noexcept override + { + next->queryRealisation(output, std::move(callback)); + } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { + return next->queryPathFromHashPart(hashPart); + } + + void registerDrvOutput(const Realisation & output) override + { + next->registerDrvOutput(output); + } + + ref getFSAccessor(bool requireValidPath) override + { + return next->getFSAccessor(requireValidPath); + } + + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + return next->getFSAccessor(path, requireValidPath); + } + + std::optional isTrustedClient() override + { + return next->isTrustedClient(); + } +}; + +struct CmdProvenanceVerify : StorePathsCommand +{ + bool noRebuild = false; + + CmdProvenanceVerify() + { + addFlag({ + .longName = "no-rebuild", + .description = "Skip rebuilding derivations to verify reproducibility.", + .handler = {&noRebuild, true}, + }); + } + + std::string description() override + { + return "verify the provenance of store paths"; + } + + std::string doc() override + { + return +#include "provenance-verify.md" + ; + } + + bool verifySourcePath(Store & store, const StorePath & expectedPath, const SourcePath & sourcePath) + { + auto computedPath = fetchToStore2(fetchSettings, store, sourcePath, FetchMode::Copy, expectedPath.name()).first; + if (computedPath != expectedPath) { + logger->cout( + "❌ " ANSI_RED "store path mismatch for source '%s': expected '%s' but got '%s'" ANSI_NORMAL, + sourcePath.to_string(), + store.printStorePath(expectedPath), + store.printStorePath(computedPath)); + return false; + } else { + logger->cout("✅ verified store path for source '%s'", sourcePath.to_string()); + return true; + } + } + + using CheckResult = std::variant< + std::pair>, + std::pair, + std::monostate>; + + std::pair + verify(Store & store, std::optional path, std::shared_ptr provenance) + { + if (auto copied = std::dynamic_pointer_cast(provenance)) { + if (!path) { + logger->cout("❌ " ANSI_RED "cannot verify copied provenance without a store path" ANSI_NORMAL); + return {false, std::monostate{}}; + } + bool success = true; + auto fromStore = openStore(copied->from); + auto localInfo = store.queryPathInfo(*path); + auto fromInfo = fromStore->queryPathInfo(*path); + if (localInfo->narHash != fromInfo->narHash) { + logger->cout( + "❌ " ANSI_RED "NAR hash mismatch in origin store '%s': should be '%s' but is '%s'" ANSI_NORMAL, + copied->from, + localInfo->narHash.to_string(HashFormat::SRI, true), + fromInfo->narHash.to_string(HashFormat::SRI, true)); + success = false; + } else + logger->cout("✅ verified NAR hash in origin store '%s'", copied->from); + auto [nextSuccess, result] = verify(store, path, copied->next); + return {success && nextSuccess, std::move(result)}; + } + + else if (auto build = std::dynamic_pointer_cast(provenance)) { + auto success = verify(store, build->drvPath, build->next).first; + + // Verify that `path` is the expected output of the derivation. + auto outputMap = store.queryPartialDerivationOutputMap(build->drvPath); + auto it = outputMap.find(build->output); + if (it == outputMap.end()) { + logger->cout( + "❌ " ANSI_RED "derivation '%s' does not have expected output '%s'" ANSI_NORMAL, + store.printStorePath(build->drvPath), + build->output); + return {false, std::monostate{}}; + } else if (!it->second) { + // Note: this is not an error, should we even print a message? + logger->cout( + "❓ output '%s' of derivation '%s' is not statically known", + build->output, + store.printStorePath(build->drvPath)); + } else if (*it->second != path) { + logger->cout( + "❌ " ANSI_RED "output '%s' of derivation '%s' is '%s', expected '%s'" ANSI_NORMAL, + build->output, + store.printStorePath(build->drvPath), + store.printStorePath(*it->second), + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + // Do a check rebuild to verify that the derivation + // produces the same output. + if (noRebuild) { + logger->cout( + "⏭️ skipped rebuild of derivation '%s^%s'", store.printStorePath(build->drvPath), build->output); + } else { + try { + store.buildPaths( + {DerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{build->drvPath}), + .outputs = OutputsSpec::Names{build->output}, + }}, + bmCheck); + logger->cout("✅ rebuilt derivation '%s^%s'", store.printStorePath(build->drvPath), build->output); + } catch (Error & e) { + logger->cout( + "❌ " ANSI_RED "rebuild of derivation '%s^%s' failed: %s" ANSI_NORMAL, + store.printStorePath(build->drvPath), + build->output, + e.what()); + success = false; + } + } + + return {success, std::monostate{}}; + } + + else if (auto flake = std::dynamic_pointer_cast(provenance)) { + // Fetch the flake source. + auto [success, _res] = verify(store, {}, flake->next); + + auto res = std::get_if>(&_res); + if (!res) + return {false, std::monostate{}}; + + // Evaluate the flake output. + flake::LockFlags lockFlags{ + .updateLockFile = false, + .failOnUnlocked = true, + .useRegistries = false, + .allowUnlocked = false, + }; + + if (res->second.path.baseName() != "flake.nix") { + logger->cout( + "❌ " ANSI_RED "expected flake source to be a 'flake.nix' file, but got '%s'" ANSI_NORMAL, + res->second.path.abs()); + return {false, std::monostate{}}; + } + + auto trackingStore = make_ref(getEvalStore()); + + auto evalState = + ref(std::allocate_shared( + traceable_allocator(), + LookupPath{}, + ref(trackingStore), + fetchSettings, + evalSettings, + getStore())); + + InstallableFlake installable{ + nullptr, + evalState, + FlakeRef{std::move(res->first), std::string(res->second.path.parent().value().rel())}, + "." + flake->flakeOutput, + ExtendedOutputsSpec::Default{}, // FIXME: record this in the provenance? + {}, + lockFlags, + {}}; + + // We have to disable the eval cache to ensure that we see which store paths get instantiated. + installable.useEvalCache = false; + + installable.toDerivedPaths(); + + evalState->waitForAllPaths(); + + logger->cout("✅ evaluated '%s#%s'", installable.flakeRef.to_string(true), flake->flakeOutput); + + if (path) { + if (!trackingStore->instantiatedPaths.contains(*path)) { + logger->cout( + "❌ " ANSI_RED "evaluation did not re-instantiate path '%s'" ANSI_NORMAL, + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + logger->cout("✅ re-instantiated path '%s'", store.printStorePath(*path)); + } + + return {success, std::monostate{}}; + } + + else if (auto tree = std::dynamic_pointer_cast(provenance)) { + auto input = fetchers::Input::fromAttrs(fetchSettings, fetchers::jsonToAttrs(*tree->attrs)); + try { + auto [accessor, final] = input.getAccessor(fetchSettings, store); + if (!input.isLocked(fetchSettings)) + logger->cout("❓ fetched tree '%s', but it's unlocked", input.to_string()); + else + // FIXME: check NAR hash? + logger->cout("✅ fetched tree '%s'", input.to_string()); + + bool success = !path || verifySourcePath(store, *path, SourcePath(accessor, CanonPath::root)); + + return {success, std::make_pair(std::move(final), accessor)}; + } catch (Error & e) { + logger->cout("❌ " ANSI_RED "failed to fetch tree '%s': %s" ANSI_NORMAL, input.to_string(), e.what()); + return {false, std::monostate{}}; + } + } + + else if (auto subpath = std::dynamic_pointer_cast(provenance)) { + auto [success, result] = verify(store, {}, subpath->next); + if (auto p = std::get_if>>(&result)) { + + auto sourcePath = SourcePath(p->second, subpath->subpath); + + if (path && !verifySourcePath(store, *path, sourcePath)) + success = false; + + return {success, std::make_pair(std::move(p->first), std::move(sourcePath))}; + } else + return {false, std::monostate{}}; + } + + else if (auto drv = std::dynamic_pointer_cast(provenance)) + return verify(store, path, drv->next); + + else if (auto fetchurl = std::dynamic_pointer_cast(provenance)) { + if (!path) + return {false, std::monostate{}}; + + auto info = store.queryPathInfo(*path); + + if (!info->ca) { + logger->cout( + "❌ " ANSI_RED "cannot verify URL '%s' without a content address for path '%s'" ANSI_NORMAL, + fetchurl->url, + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + if (info->ca->method != ContentAddressMethod::Raw::Flat) { + logger->cout( + "❌ " ANSI_RED + "cannot verify URL '%s' with unsupported content address method for path '%s'" ANSI_NORMAL, + fetchurl->url, + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + HashSink hashSink{info->ca->hash.algo}; + FileTransferRequest req(fetchurl->url); + req.decompress = false; + getFileTransfer()->download(std::move(req), hashSink); + auto hash = hashSink.finish().hash; + + if (hash != info->ca->hash) { + logger->cout( + "❌ " ANSI_RED "hash mismatch for URL '%s': expected '%s' but got '%s'" ANSI_NORMAL, + fetchurl->url, + info->ca->hash.to_string(HashFormat::SRI, true), + hash.to_string(HashFormat::SRI, true)); + return {false, std::monostate{}}; + } + + logger->cout("✅ verified hash of URL '%s'", fetchurl->url); + return {true, std::monostate{}}; + } + + else if (!provenance) { + logger->cout("❓ " ANSI_RED "missing further provenance" ANSI_NORMAL); + return {false, std::monostate{}}; + } + + else { + logger->cout("❓ " ANSI_RED "unknown provenance type" ANSI_NORMAL); + return {false, std::monostate{}}; + } + } + + void run(ref store, StorePaths && storePaths) override + { + bool first = true; + bool success = true; + + for (auto & storePath : storePaths) { + auto info = store->queryPathInfo(storePath); + if (!first) + logger->cout(""); + first = false; + logger->cout(ANSI_BOLD "%s" ANSI_NORMAL, store->printStorePath(info->path)); + + if (info->provenance) + success &= verify(*store, storePath, info->provenance).first; + else { + logger->cout(ANSI_RED " (no provenance information available)" ANSI_NORMAL); + success = false; + } + } + + if (!success) + throw Exit(1); + } +}; + +static auto rCmdProvenanceVerify = registerCommand2({"provenance", "verify"}); diff --git a/src/nix/ps.cc b/src/nix/ps.cc new file mode 100644 index 000000000000..9ae9d97bf98c --- /dev/null +++ b/src/nix/ps.cc @@ -0,0 +1,146 @@ +#include "nix/cmd/command.hh" +#include "nix/main/common-args.hh" +#include "nix/main/shared.hh" +#include "nix/store/store-api.hh" +#include "nix/store/store-cast.hh" +#include "nix/store/active-builds.hh" +#include "nix/util/table.hh" +#include "nix/util/terminal.hh" + +#include + +using namespace nix; + +struct CmdPs : MixJSON, StoreCommand +{ + std::string description() override + { + return "list active builds"; + } + + Category category() override + { + return catUtility; + } + + std::string doc() override + { + return +#include "ps.md" + ; + } + + void run(ref store) override + { + auto & tracker = require(*store); + + auto builds = tracker.queryActiveBuilds(); + + if (json) { + printJSON(nlohmann::json(builds)); + return; + } + + if (builds.empty()) { + notice("No active builds."); + return; + } + + /* Helper to format user info: show name if available, else UID */ + auto formatUser = [](const UserInfo & user) -> std::string { + return user.name ? *user.name : std::to_string(user.uid); + }; + + Table table; + + /* Add column headers. */ + table.push_back({{"USER"}, {"PID"}, {"CPU", TableCell::Alignment::Right}, {"DERIVATION/COMMAND"}}); + + for (const auto & build : builds) { + /* Calculate CPU time - use cgroup stats if available, otherwise sum process times. */ + std::chrono::microseconds cpuTime = build.utime && build.stime ? *build.utime + *build.stime : [&]() { + std::chrono::microseconds total{0}; + for (const auto & process : build.processes) + total += process.utime.value_or(std::chrono::microseconds(0)) + + process.stime.value_or(std::chrono::microseconds(0)) + + process.cutime.value_or(std::chrono::microseconds(0)) + + process.cstime.value_or(std::chrono::microseconds(0)); + return total; + }(); + + /* Add build summary row. */ + table.push_back( + {formatUser(build.mainUser), + std::to_string(build.mainPid), + {fmt("%.1fs", + std::chrono::duration_cast>(cpuTime) + .count()), + TableCell::Alignment::Right}, + fmt(ANSI_BOLD "%s" ANSI_NORMAL " (wall=%ds)", + store->printStorePath(build.derivation), + time(nullptr) - build.startTime)}); + + if (build.processes.empty()) { + table.push_back( + {formatUser(build.mainUser), + std::to_string(build.mainPid), + {"", TableCell::Alignment::Right}, + fmt("%s" ANSI_ITALIC "(no process info)" ANSI_NORMAL, treeLast)}); + } else { + /* Recover the tree structure of the processes. */ + std::set pids; + for (auto & process : build.processes) + pids.insert(process.pid); + + using Processes = std::set; + std::map children; + Processes rootProcesses; + for (auto & process : build.processes) { + if (pids.contains(process.parentPid)) + children[process.parentPid].insert(&process); + else + rootProcesses.insert(&process); + } + + /* Render the process tree. */ + [&](this auto const & visit, const Processes & processes, std::string_view prefix) -> void { + for (const auto & [n, process] : enumerate(processes)) { + bool last = n + 1 == processes.size(); + + // Format CPU time if available + std::string cpuInfo; + if (process->utime || process->stime || process->cutime || process->cstime) { + auto totalCpu = process->utime.value_or(std::chrono::microseconds(0)) + + process->stime.value_or(std::chrono::microseconds(0)) + + process->cutime.value_or(std::chrono::microseconds(0)) + + process->cstime.value_or(std::chrono::microseconds(0)); + auto totalSecs = + std::chrono::duration_cast>( + totalCpu) + .count(); + cpuInfo = fmt("%.1fs", totalSecs); + } + + // Format argv with tree structure + auto argv = concatStringsSep( + " ", tokenizeString>(concatStringsSep(" ", process->argv))); + + table.push_back( + {formatUser(process->user), + std::to_string(process->pid), + {cpuInfo, TableCell::Alignment::Right}, + fmt("%s%s%s", prefix, last ? treeLast : treeConn, argv)}); + + visit(children[process->pid], last ? prefix + treeNull : prefix + treeLine); + } + }(rootProcesses, ""); + } + } + + auto width = isTTY() && isatty(STDOUT_FILENO) ? getWindowWidth() : std::numeric_limits::max(); + + printTable(std::cout, table, width); + } +}; + +static auto rCmdPs = registerCommand2({"ps"}); diff --git a/src/nix/ps.md b/src/nix/ps.md new file mode 100644 index 000000000000..e48a308e6965 --- /dev/null +++ b/src/nix/ps.md @@ -0,0 +1,27 @@ +R"( + +# Examples + +* Show all active builds: + + ```console + # nix ps + USER PID CPU DERIVATION/COMMAND + nixbld11 3534394 110.2s /nix/store/lzvdxlbr6xjd9w8py4nd2y2nnqb9gz7p-nix-util-tests-3.13.2.drv (wall=8s) + nixbld11 3534394 0.8s └───bash -e /nix/store/jwqf79v5p51x9mv8vx20fv9mzm2x7kig-source-stdenv.sh /nix/store/shkw4qm9qcw5sc5n1k5jznc83ny02 + nixbld11 3534751 36.3s └───ninja -j24 + nixbld11 3535637 0.0s ├───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/bin/g++ -fPIC -fstack-clash-protection -O2 -U_ + nixbld11 3535639 0.1s │ └───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/libexec/gcc/x86_64-unknown-linux-gnu/14.3. + nixbld11 3535658 0.0s └───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/bin/g++ -fPIC -fstack-clash-protection -O2 -U_ + nixbld1 3534377 1.8s /nix/store/nh2dx9cqcy9lw4d4rvd0dbsflwdsbzdy-patchelf-0.18.0.drv (wall=5s) + nixbld1 3534377 1.8s └───bash -e /nix/store/xk05lkk4ij6pc7anhdbr81appiqbcb01-default-builder.sh + nixbld1 3535074 0.0s └───/nix/store/21ymxxap3y8hb9ijcfah8ani9cjpv8m6-bash-5.2p37/bin/bash ./configure --disable-dependency-trackin + ``` + +# Description + +This command lists all currently running Nix builds. +For each build, it shows the derivation path and the main process ID. +On Linux and macOS, it also shows the child processes of each build. + +)" diff --git a/src/nix/repl.cc b/src/nix/repl.cc index ea1ecb305aa0..cd790c0b09d2 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -47,9 +47,9 @@ struct CmdRepl : RawInstallablesCommand std::vector files; - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - return {""}; + return {"nix-repl"}; } bool forceImpureByDefault() override diff --git a/src/nix/repl.md b/src/nix/repl.md index 32c08e24b240..e608dabf6f9c 100644 --- a/src/nix/repl.md +++ b/src/nix/repl.md @@ -36,7 +36,7 @@ R""( Loading Installable ''... Added 1 variables. - # nix repl --extra-experimental-features 'flakes' nixpkgs + # nix repl nixpkgs Loading Installable 'flake:nixpkgs#'... Added 5 variables. diff --git a/src/nix/run.cc b/src/nix/run.cc index 38dbc42b6815..29c66a242a51 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -137,23 +137,9 @@ struct CmdRun : InstallableValueCommand, MixEnvironment ; } - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - Strings res{ - "apps." + settings.thisSystem.get() + ".default", - "defaultApp." + settings.thisSystem.get(), - }; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPaths()) - res.push_back(s); - return res; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - Strings res{"apps." + settings.thisSystem.get() + "."}; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes()) - res.push_back(s); - return res; + return {"nix-run"}; } void run(ref store, ref installable) override diff --git a/src/nix/search.cc b/src/nix/search.cc index dac60ceba573..509287b80206 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -11,6 +11,7 @@ #include "nix/expr/attr-path.hh" #include "nix/util/hilite.hh" #include "nix/util/strings-inline.hh" +#include "nix/expr/parallel-eval.hh" #include #include @@ -56,9 +57,9 @@ struct CmdSearch : InstallableValueCommand, MixJSON ; } - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - return {"packages." + settings.thisSystem.get(), "legacyPackages." + settings.thisSystem.get()}; + return {"nix-search"}; } void run(ref store, ref installable) override @@ -84,11 +85,13 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto state = getEvalState(); - std::optional jsonOut; + std::optional> jsonOut; if (json) - jsonOut = json::object(); + jsonOut.emplace(json::object()); - uint64_t results = 0; + std::atomic results = 0; + + FutureVector futures(*state->executor); std::function visit; @@ -96,15 +99,22 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto attrPathS = state->symbols.resolve({attrPath}); auto attrPathStr = attrPath.to_string(*state); + /* Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", attrPathStr)); + */ try { auto recurse = [&]() { + Executor::WorkItems work; for (const auto & attr : cursor.getAttrs()) { auto cursor2 = cursor.getAttr(state->symbols[attr]); auto attrPath2(attrPath); attrPath2.push_back(attr); - visit(*cursor2, attrPath2, false); + state->addWork( + work, + std::string_view(state->symbols[attr]).find("Packages") != std::string_view::npos ? 0 : 2, + [cursor2, attrPath2, visit]() { visit(*cursor2, attrPath2, false); }); } + futures.spawn(std::move(work)); }; if (cursor.isDerivation()) { @@ -147,21 +157,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON if (found) { results++; if (json) { - (*jsonOut)[attrPathStr] = { + (*jsonOut->lock())[attrPathStr] = { {"pname", name.name}, {"version", name.version}, {"description", description}, }; } else { - if (results > 1) - logger->cout(""); - logger->cout( - "* %s%s", - wrap("\e[0;1m", hiliteMatches(attrPathStr, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - optionalBracket(" (", name.version, ")")); + auto out = + fmt("%s* %s%s", + results > 1 ? "\n" : "", + wrap("\e[0;1m", hiliteMatches(attrPathStr, attrPathMatches, ANSI_GREEN, "\e[0;1m")), + optionalBracket(" (", name.version, ")")); if (description != "") - logger->cout( - " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + out += fmt( + "\n %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + logger->cout(out); } } } @@ -186,14 +196,20 @@ struct CmdSearch : InstallableValueCommand, MixJSON } }; - for (auto & cursor : installable->getCursors(*state)) - visit(*cursor, cursor->getAttrPath(), true); + Executor::WorkItems work; + for (auto & cursor : installable->getCursors(*state, false)) + state->addWork(work, 1, [cursor, visit]() { visit(*cursor, cursor->getAttrPath(), true); }); + + futures.spawn(std::move(work)); + futures.finishAll(); if (json) - printJSON(*jsonOut); + printJSON(*(jsonOut->lock())); if (!json && !results) throw Error("no results for the given search term(s)!"); + + notice("Found %d matching packages.", results); } }; diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index 513d76e9b7ef..586f82b617fc 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -15,14 +15,6 @@ using namespace nix; -/** - * Check whether a path has a "profiles" component. - */ -static bool hasProfilesComponent(const std::filesystem::path & path) -{ - return std::ranges::contains(path, OS_STR("profiles")); -} - /** * Settings related to upgrading Nix itself. */ @@ -33,11 +25,12 @@ struct UpgradeSettings : Config */ Setting storePathUrl{ this, - "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix", + "", "upgrade-nix-store-path-url", R"( - Used by `nix upgrade-nix`, the URL of the file that contains the - store paths of the latest Nix release. + Deprecated. This option was used to configure how `nix upgrade-nix` operated. + + Using this setting has no effect. It will be removed in a future release of Determinate Nix. )"}; }; @@ -47,26 +40,6 @@ static GlobalConfig::Register rSettings(&upgradeSettings); struct CmdUpgradeNix : MixDryRun, StoreCommand { - std::filesystem::path profileDir; - - CmdUpgradeNix() - { - addFlag({ - .longName = "profile", - .shortName = 'p', - .description = "The path to the Nix profile to upgrade.", - .labels = {"profile-dir"}, - .handler = {&profileDir}, - }); - - addFlag({ - .longName = "nix-store-paths-url", - .description = "The URL of the file that contains the store paths of the latest Nix release.", - .labels = {"url"}, - .handler = {&(std::string &) upgradeSettings.storePathUrl}, - }); - } - /** * This command is stable before the others */ @@ -77,7 +50,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand std::string description() override { - return "upgrade Nix to the latest stable version"; + return "deprecated in favor of determinate-nixd upgrade"; } std::string doc() override @@ -94,118 +67,9 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand void run(ref store) override { - evalSettings.pureEval = true; - - if (profileDir == "") - profileDir = getProfileDir(store); - - printInfo("upgrading Nix in profile %s", PathFmt(profileDir)); - - auto storePath = getLatestNix(store); - - auto version = DrvName(storePath.name()).version; - - if (dryRun) { - logger->stop(); - warn("would upgrade to version %s", version); - return; - } - - { - Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", store->printStorePath(storePath))); - store->ensurePath(storePath); - } - - { - Activity act( - *logger, lvlInfo, actUnknown, fmt("verifying that '%s' works...", store->printStorePath(storePath))); - auto program = store->printStorePath(storePath) + "/bin/nix-env"; - auto s = runProgram(program, false, {OS_STR("--version")}); - if (s.find("Nix") == std::string::npos) - throw Error("could not verify that '%s' works", program); - } - - logger->stop(); - - { - Activity act( - *logger, - lvlInfo, - actUnknown, - fmt("installing '%s' into profile %s...", store->printStorePath(storePath), PathFmt(profileDir))); - - // FIXME: don't call an external process. - runProgram( - getNixBin("nix-env"), - false, - { - OS_STR("--profile"), - profileDir.native(), - OS_STR("-i"), - string_to_os_string(store->printStorePath(storePath)), - OS_STR("--no-sandbox"), - }); - } - - printInfo(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version); - } - - /* Return the profile in which Nix is installed. */ - std::filesystem::path getProfileDir(ref store) - { - auto whereOpt = ExecutablePath::load().findName(OS_STR("nix-env")); - if (!whereOpt) - throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); - const auto & where = whereOpt->parent_path(); - - printInfo("found Nix in %s", PathFmt(where)); - - if (hasPrefix(where.string(), "/run/current-system")) - throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); - - auto profileDir = where.parent_path(); - - // Chase symlinks until we find a path under a "profiles" - // directory, or we run out of symlinks. - auto resolved = profileDir; - while (!hasProfilesComponent(canonPath(resolved)) && std::filesystem::is_symlink(resolved)) - // Note that operator/ replaces lhs when rhs is absolute. - resolved = resolved.parent_path() / readLink(resolved); - printInfo("found profile %s", PathFmt(resolved)); - - if (std::filesystem::exists(profileDir / "manifest.json")) - throw Error( - "directory %s is managed by 'nix profile' and currently cannot be upgraded by 'nix upgrade-nix'", - PathFmt(profileDir)); - - if (!std::filesystem::exists(profileDir / "manifest.nix")) - throw Error("directory %s does not appear to be part of a Nix profile", PathFmt(profileDir)); - - auto userEnv = store->followLinksToStorePath(profileDir.string()); - - if (!store->isValidPath(userEnv)) - throw Error("directory %s is not in the Nix store", PathFmt(profileDir)); - - return profileDir; - } - - /* Return the store path of the latest stable Nix. */ - StorePath getLatestNix(ref store) - { - Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); - - // FIXME: use nixos.org? - auto req = FileTransferRequest(parseURL(upgradeSettings.storePathUrl.get())); - auto res = getFileTransfer()->download(req); - - auto state = std::make_shared(LookupPath{}, store, fetchSettings, evalSettings); - auto v = state->allocValue(); - state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v); - Bindings & bindings = Bindings::emptyBindings; - auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first; - - return store->parseStorePath( - state->forceString(*v2, noPos, "while evaluating the path tho latest nix version")); + throw Error( + "The upgrade-nix command isn't available in Determinate Nix; use %s instead", + "sudo determinate-nixd upgrade"); } }; diff --git a/src/nix/upgrade-nix.md b/src/nix/upgrade-nix.md index 3a3bf61b9b05..bb5157175826 100644 --- a/src/nix/upgrade-nix.md +++ b/src/nix/upgrade-nix.md @@ -1,33 +1,11 @@ R""( -# Examples - -* Upgrade Nix to the stable version declared in Nixpkgs: - - ```console - # nix upgrade-nix - ``` - -* Upgrade Nix in a specific profile: - - ```console - # nix upgrade-nix --profile ~alice/.local/state/nix/profiles/profile - ``` - # Description -This command upgrades Nix to the stable version. - -By default, the latest stable version is defined by Nixpkgs, in -[nix-fallback-paths.nix](https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix) -and updated manually. It may not always be the latest tagged release. - -By default, it locates the directory containing the `nix` binary in the `$PATH` -environment variable. If that directory is a Nix profile, it will -upgrade the `nix` package in that profile to the latest stable binary -release. +This command isn't available in Determinate Nix but is present in order to guide +users to the new upgrade path. -You cannot use this command to upgrade Nix in the system profile of a -NixOS system (that is, if `nix` is found in `/run/current-system`). +Use `sudo determinate-nixd upgrade` to upgrade Determinate Nix on systems that manage it imperatively. +In practice, this is any system that isn't running NixOS. )"" diff --git a/src/perl/lib/Nix/Store.xs b/src/perl/lib/Nix/Store.xs index 6c4bf2e7051d..505faf0279cb 100644 --- a/src/perl/lib/Nix/Store.xs +++ b/src/perl/lib/Nix/Store.xs @@ -234,7 +234,7 @@ StoreWrapper::exportPaths(int fd, ...) StorePathSet paths; for (int n = 2; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); FdSink sink(fd); - exportPaths(*THIS->store, paths, sink); + exportPaths(*THIS->store, paths, sink, 1); } catch (Error & e) { croak("%s", e.what()); } diff --git a/src/perl/lib/Nix/meson.build b/src/perl/lib/Nix/meson.build index dd5560e21cc5..675d33b63710 100644 --- a/src/perl/lib/Nix/meson.build +++ b/src/perl/lib/Nix/meson.build @@ -38,13 +38,19 @@ nix_perl_store_cc = custom_target( command : [ xsubpp, '@INPUT@', '-output', '@OUTPUT@' ], ) +if host_machine.system() == 'darwin' and get_option('default_library') == 'static' + prelink = false +else + prelink = true +endif + # Build Nix::Store Library #------------------------------------------------- nix_perl_store_lib = library( 'Store', sources : nix_perl_store_cc, name_prefix : '', - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, install_mode : 'rwxr-xr-x', install_dir : join_paths(nix_perl_install_dir, 'auto', 'Nix', 'Store'), diff --git a/src/perl/package.nix b/src/perl/package.nix index e25b2996c83c..21485091fdd9 100644 --- a/src/perl/package.nix +++ b/src/perl/package.nix @@ -18,7 +18,7 @@ in perl.pkgs.toPerlModule ( mkMesonDerivation (finalAttrs: { - pname = "nix-perl"; + pname = "determinate-nix-perl"; inherit version; workDir = ./.; diff --git a/tests/functional/binary-cache.sh b/tests/functional/binary-cache.sh index 58025b789737..68263459337e 100755 --- a/tests/functional/binary-cache.sh +++ b/tests/functional/binary-cache.sh @@ -15,6 +15,7 @@ nix-instantiate --store "file://$cacheDir" dependencies.nix clearStore clearCache outPath=$(nix-build dependencies.nix --no-out-link) +depPath=$(nix-build dependencies.nix -A input0_drv --no-out-link) nix copy --to "file://$cacheDir" "$outPath" @@ -86,6 +87,19 @@ export _NIX_FORCE_HTTP=1 basicDownloadTests +# Test that multiple concurrent substitutions do only one download. +clearStore +nix-store --init # needed because concurrent creation of the store can give SQLite errors +_NIX_TEST_CONCURRENT_SUBSTITUTION=1 nix-store -r "$depPath" --substituters "file://$cacheDir" --no-require-sigs -vvvv 2> "$TEST_ROOT/log1" & +pid1="$!" +_NIX_TEST_CONCURRENT_SUBSTITUTION=1 nix-store -r "$depPath" --substituters "file://$cacheDir" --no-require-sigs -vvvv 2> "$TEST_ROOT/log2" & +pid2="$!" +wait "$pid1" +wait "$pid2" +[[ $(cat "$TEST_ROOT/log1" "$TEST_ROOT/log2" | grep -c "copying path ") -eq 2 ]] +[[ $(cat "$TEST_ROOT/log1" "$TEST_ROOT/log2" | grep -c "downloading.*nar.xz") -eq 1 ]] + + # Test whether Nix notices if the NAR doesn't match the hash in the NAR info. clearStore diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0e76f949f55d..fdef4d5f7cee 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -158,33 +158,22 @@ printf "" | nix build --no-link --stdin --json | jq --exit-status '. == []' printf "%s\n" "$drv^*" | nix build --no-link --stdin --json | jq --exit-status '.[0]|has("drvPath")' # --keep-going and FOD -if isDaemonNewer "2.34pre"; then - # With the fix, cancelled goals are not reported as failures. - # Use -j1 so only x1 starts and fails; x2, x3, x4 are cancelled. - out="$(nix build -f fod-failing.nix -j1 -L 2>&1)" && status=0 || status=$? - test "$status" = 1 - # Only the hash mismatch error for x1. Cancelled goals not reported. - test "$(<<<"$out" grep -cE '^error:')" = 1 - # Regression test: error messages should not be empty (end with just "failed:") - <<<"$out" grepQuietInverse -E "^error:.*failed: *$" -else - out="$(nix build -f fod-failing.nix -L 2>&1)" && status=0 || status=$? - test "$status" = 1 - # At minimum, check that x1 is reported as failing - <<<"$out" grepQuiet -E "error:.*-x1" -fi +out="$(nix build -f fod-failing.nix -j1 -L 2>&1)" && status=0 || status=$? +test "$status" = 1 +# Only the hash mismatch error for the first failing goal (x1). +# The other goals (x2, x3, x4) are cancelled and not reported as failures. +test "$(<<<"$out" grep -cE '^error:')" = 1 <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" <<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x3\\.drv'" <<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x2\\.drv'" out="$(nix build -f fod-failing.nix -L x1 x2 x3 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -# three "hash mismatch" errors - for each failing fod, one "build of ... failed" -test "$(<<<"$out" grep -cE '^error:')" = 4 +# three "hash mismatch" errors - for each failing fod +test "$(<<<"$out" grep -cE '^error:')" = 3 <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x3\\.drv'" <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" -<<<"$out" grepQuiet -E "error: build of '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out' failed" out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 diff --git a/tests/functional/ca/build-with-garbage-path.sh b/tests/functional/ca/build-with-garbage-path.sh index 298cd469a928..b610f8e2b1c8 100755 --- a/tests/functional/ca/build-with-garbage-path.sh +++ b/tests/functional/ca/build-with-garbage-path.sh @@ -9,7 +9,7 @@ requireDaemonNewerThan "2.4pre20210621" # Get the output path of `rootCA`, and put some garbage instead outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)" # shellcheck disable=SC2046 # Multiple store paths need to become individual args -nix-store --delete $(nix-store -q --referrers-closure "$outPath") +nix-store --delete $(nix-store -q --referrers-closure "$outPath") --ignore-liveness touch "$outPath" # The build should correctly remove the garbage and put the expected path instead diff --git a/tests/functional/ca/derivation-json.sh b/tests/functional/ca/derivation-json.sh index f94c5a72c3f0..eb1d949676a7 100644 --- a/tests/functional/ca/derivation-json.sh +++ b/tests/functional/ca/derivation-json.sh @@ -19,7 +19,7 @@ drvPath3=$(nix derivation add --dry-run < "$TEST_HOME"/foo.json) [[ ! -e "$drvPath3" ]] # But the JSON is rejected without the experimental feature -expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features '' | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" # Without --dry-run it is actually written drvPath4=$(nix derivation add < "$TEST_HOME"/foo.json) diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 7ac9ec9f78d0..fdee6b07ab09 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -4,7 +4,7 @@ source common.sh requireDaemonNewerThan "2.4pre20210626" -enableFeatures "ca-derivations nix-command flakes" +enableFeatures "ca-derivations" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. diff --git a/tests/functional/common/functions.sh b/tests/functional/common/functions.sh index 771bbca785bc..3e3aeef3ddcf 100644 --- a/tests/functional/common/functions.sh +++ b/tests/functional/common/functions.sh @@ -73,6 +73,7 @@ startDaemon() { fi # Start the daemon, wait for the socket to appear. rm -f "$NIX_DAEMON_SOCKET_PATH" + # TODO: remove the nix-command feature when we're no longer testing against old daemons. PATH=$DAEMON_PATH nix --extra-experimental-features 'nix-command' daemon & _NIX_TEST_DAEMON_PID=$! export _NIX_TEST_DAEMON_PID @@ -132,11 +133,11 @@ restartDaemon() { } isDaemonNewer () { - [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 - local requiredVersion="$1" - local daemonVersion - daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | cut -d' ' -f3) - [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] + [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 + local requiredVersion="$1" + local daemonVersion + daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | sed 's/.*) //') + [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] } skipTest () { diff --git a/tests/functional/common/init.sh b/tests/functional/common/init.sh index 66b44c76f696..9b2bed678ec4 100755 --- a/tests/functional/common/init.sh +++ b/tests/functional/common/init.sh @@ -3,6 +3,9 @@ # for shellcheck : "${test_nix_conf_dir?}" "${test_nix_conf?}" +# Don't upload crashes from tests to Sentry. +export NIX_SENTRY_ENDPOINT= + if isTestOnNixOS; then mkdir -p "$test_nix_conf_dir" "$TEST_HOME" @@ -12,7 +15,7 @@ if isTestOnNixOS; then ! test -e "$test_nix_conf" cat > "$test_nix_conf" < "$NIX_CONF_DIR"/nix.conf < "$NIX_CONF_DIR"/nix.conf.extra <"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +xpFeature=auto-allocate-uids +gatedSetting=auto-allocate-uids + +# Experimental feature is disabled before, ignore and warn. +NIX_CONFIG=" + experimental-features = + $gatedSetting = true +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is disabled after, ignore and warn -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command -' expect 1 nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is disabled after, ignore and warn. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled before, process -NIX_CONFIG=' - experimental-features = nix-command flakes - accept-flake-config = true -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled before, process. +NIX_CONFIG=" + experimental-features = $xpFeature + $gatedSetting = true +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled after, process -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command flakes -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled after, process. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = $xpFeature +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuietInverse "Ignoring setting '$gatedSetting'" "$TEST_ROOT/stderr" function exit_code_both_ways { - expect 1 nix --experimental-features 'nix-command' "$@" 1>/dev/null - nix --experimental-features 'nix-command flakes' "$@" 1>/dev/null + expect 1 nix --experimental-features '' "$@" 1>/dev/null + nix --experimental-features "$xpFeature" "$@" 1>/dev/null # Also, the order should not matter - expect 1 nix "$@" --experimental-features 'nix-command' 1>/dev/null - nix "$@" --experimental-features 'nix-command flakes' 1>/dev/null + expect 1 nix "$@" --experimental-features '' 1>/dev/null + nix "$@" --experimental-features "$xpFeature" 1>/dev/null } -exit_code_both_ways show-config --flake-registry 'https://no' +exit_code_both_ways config show --auto-allocate-uids # Double check these are stable nix --experimental-features '' --help 1>/dev/null nix --experimental-features '' doctor --help 1>/dev/null nix --experimental-features '' repl --help 1>/dev/null nix --experimental-features '' upgrade-nix --help 1>/dev/null - -# These 3 arguments are currently given to all commands, which is wrong (as not -# all care). To deal with fixing later, we simply make them require the -# nix-command experimental features --- it so happens that the commands we wish -# stabilizing to do not need them anyways. -for arg in '--print-build-logs' '--offline' '--refresh'; do - nix --experimental-features 'nix-command' "$arg" --help 1>/dev/null - expect 1 nix --experimental-features '' "$arg" --help 1>/dev/null -done diff --git a/tests/functional/export.sh b/tests/functional/export.sh index 53bbdd9ac39e..a74efa91d806 100755 --- a/tests/functional/export.sh +++ b/tests/functional/export.sh @@ -7,18 +7,24 @@ TODO_NixOS clearStore outPath=$(nix-build dependencies.nix --no-out-link) +drvPath=$(nix path-info --json "$outPath" | jq -r .\""$outPath"\".deriver) nix-store --export "$outPath" > "$TEST_ROOT"/exp +expectStderr 1 nix nario export "$outPath" | grepQuiet "required argument.*missing" +nix nario export --format 1 "$outPath" > "$TEST_ROOT/exp2" +cmp "$TEST_ROOT/exp" "$TEST_ROOT/exp2" # shellcheck disable=SC2046 nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all +nix nario export --format 1 -r "$outPath" > "$TEST_ROOT"/exp_all2 +cmp "$TEST_ROOT/exp_all" "$TEST_ROOT/exp_all2" + if nix-store --export "$outPath" >/dev/full ; then echo "exporting to a bad file descriptor should fail" exit 1 fi - clearStore if nix-store --import < "$TEST_ROOT"/exp; then @@ -26,7 +32,6 @@ if nix-store --import < "$TEST_ROOT"/exp; then exit 1 fi - clearStore nix-store --import < "$TEST_ROOT"/exp_all @@ -34,9 +39,42 @@ nix-store --import < "$TEST_ROOT"/exp_all # shellcheck disable=SC2046 nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all2 - clearStore # Regression test: the derivers in exp_all2 are empty, which shouldn't # cause a failure. nix-store --import < "$TEST_ROOT"/exp_all2 + +# Test `nix nario import` on files created by `nix-store --export`. +clearStore +expectStderr 1 nix nario import < "$TEST_ROOT"/exp_all | grepQuiet "lacks a signature" +nix nario import --no-check-sigs < "$TEST_ROOT"/exp_all +nix path-info "$outPath" + +# Test `nix nario list`. +nix nario list < "$TEST_ROOT"/exp_all +nix nario list < "$TEST_ROOT"/exp_all | grepQuiet ".*dependencies-input-0.*bytes" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet "dr-xr-xr-x .*0 $outPath" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet "lrwxrwxrwx .*0 $outPath/self -> $outPath" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet -- "-r--r--r-- .*7 $outPath/foobar" + +# Test format 2 (including signatures). +nix key generate-secret --key-name my-key > "$TEST_ROOT"/secret +public_key=$(nix key convert-secret-to-public < "$TEST_ROOT"/secret) +nix store sign --key-file "$TEST_ROOT/secret" -r "$outPath" +nix nario export --format 2 -r "$outPath" > "$TEST_ROOT"/exp_all +clearStore +expectStderr 1 nix nario import < "$TEST_ROOT"/exp_all | grepQuiet "lacks a signature" +nix nario import --trusted-public-keys "$public_key" < "$TEST_ROOT"/exp_all +[[ $(nix path-info --json "$outPath" | jq -r .[].signatures[]) =~ my-key: ]] + +# Test json listing. +json=$(nix nario list --json -R < "$TEST_ROOT/exp_all") +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".deriver") = "$drvPath" ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.type") = directory ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.entries.foobar.type") = regular ]] +[[ $(printf "%s" "$json" | jq ".paths.\"$outPath\".contents.entries.foobar.size") = 7 ]] + +json=$(nix nario list --json < "$TEST_ROOT/exp_all") +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".deriver") = "$drvPath" ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.type") = null ]] diff --git a/tests/functional/fetchGit.sh b/tests/functional/fetchGit.sh index 2992020f16a8..949c7eb8540a 100755 --- a/tests/functional/fetchGit.sh +++ b/tests/functional/fetchGit.sh @@ -309,3 +309,71 @@ git -C "$empty" config user.name "Foobar" git -C "$empty" commit --allow-empty --allow-empty-message --message "" nix eval --impure --expr "let attrs = builtins.fetchGit $empty; in assert attrs.lastModified != 0; assert attrs.rev != \"0000000000000000000000000000000000000000\"; assert attrs.revCount == 1; true" + +# Test backward compatibility hack for Nix < 2.20 locks / fetchTree calls that expect Git filters to be applied. +eol="$TEST_ROOT/git-eol" +createGitRepo "$eol" +mkdir -p "$eol/dir" +printf "Hello\nWorld\n" > "$eol/dir/crlf" +printf "ignore me" > "$eol/dir/ignored" +git -C "$eol" add dir/crlf dir/ignored +git -C "$eol" commit -a -m Initial +echo "Version: \$Format:%s\$" > "$eol/dir/version" +printf "crlf text eol=crlf\nignored export-ignore\nversion export-subst\n" > "$eol/dir/.gitattributes" +git -C "$eol" add dir/.gitattributes dir/version +git -C "$eol" commit -a -m 'Apply gitattributes' + +rev="$(git -C "$eol" rev-parse HEAD)" + +export _NIX_TEST_BARF_ON_UNCACHEABLE=1 + +oldHash="sha256-fccLx4BSC7e/PzQM4JnixstJQnd4dzgm73BqKhV3KRs=" +newHash="sha256-Ns7sLZOvpacagAPNun1+jBovMpo+zM7PUJ9x+lW3cIU=" + +expectStderr 0 nix eval --expr \ + "let tree = builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"$oldHash\"; }; in assert builtins.readFile \"\${tree}/dir/crlf\" == \"Hello\r\nWorld\r\n\"; assert !builtins.pathExists \"\${tree}/dir/ignored\"; assert builtins.readFile \"\${tree}/dir/version\" == \"Version: Apply gitattributes\n\"; true" \ + | grepQuiet "Please update the NAR hash to '$newHash'" + +nix eval --expr \ + "let tree = builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"$newHash\"; }; in assert builtins.readFile \"\${tree}/dir/crlf\" == \"Hello\nWorld\n\"; assert builtins.pathExists \"\${tree}/dir/ignored\"; assert builtins.readFile \"\${tree}/dir/version\" == \"Version: \$Format:%s\$\n\"; true" + +expectStderr 102 nix eval --expr \ + "builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"sha256-DLDvcwdcwCxnuPTxSQ6gLAyopB20lD0bOQoQB3i2hsA=\"; }" \ + | grepQuiet "NAR hash mismatch" + +mkdir -p "$TEST_ROOT"/flake +cat > "$TEST_ROOT"/flake/flake.nix << EOF +{ + inputs.eol = { type = "git"; url = "file://$eol"; rev = "$rev"; flake = false; }; + outputs = { self, eol }: rec { + crlf = builtins.readFile "\${eol}/dir/crlf"; + isLegacy = assert crlf == "Hello\r\nWorld\r\n"; true; + isModern = assert crlf == "Hello\nWorld\n"; true; + }; +} +EOF + +# Test locking with Nix < 2.20 semantics (i.e. using `git archive`). +nix eval --nix-219-compat "path:$TEST_ROOT/flake"#isLegacy +nix eval "path:$TEST_ROOT/flake"#isLegacy +[[ $(jq -r .nodes.eol.locked.narHash < "$TEST_ROOT"/flake/flake.lock) = "$oldHash" ]] + +# Test locking with Nix >= 2.20 semantics (i.e. using libgit2). +rm "$TEST_ROOT"/flake/flake.lock +nix eval "path:$TEST_ROOT/flake"#isModern +nix eval --nix-219-compat "path:$TEST_ROOT/flake"#isModern +[[ $(jq -r .nodes.eol.locked.narHash < "$TEST_ROOT"/flake/flake.lock) = "$newHash" ]] + + +# Test that builtins.hashString devirtualizes lazy paths (https://github.com/DeterminateSystems/determinate/issues/160). +hashStringRepo="$TEST_ROOT/hashString" +createGitRepo "$hashStringRepo" +echo hello > "$hashStringRepo"/hello +git -C "$hashStringRepo" add hello +git -C "$hashStringRepo" commit -m 'Initial' +hashStringRev=$(git -C "$hashStringRepo" rev-parse HEAD) + +hash1=$(nix eval --lazy-trees --raw --expr "builtins.hashString \"sha256\" (toString ((builtins.fetchGit { url = file://$hashStringRepo; rev = \"$hashStringRev\"; })))") +hash2=$(nix eval --lazy-trees --raw --expr "builtins.hashString \"sha256\" (toString ((builtins.fetchGit { url = file://$hashStringRepo; rev = \"$hashStringRev\"; })))") + +[[ "$hash1" = "$hash2" ]] diff --git a/tests/functional/fetchPath.sh b/tests/functional/fetchPath.sh index 1df895b61662..2784afb0388e 100755 --- a/tests/functional/fetchPath.sh +++ b/tests/functional/fetchPath.sh @@ -3,9 +3,9 @@ source common.sh touch "$TEST_ROOT/foo" -t 202211111111 -# We only check whether 2022-11-1* **:**:** is the last modified date since -# `lastModified` is transformed into UTC in `builtins.fetchTarball`. -[[ "$(nix eval --impure --raw --expr "(builtins.fetchTree \"path://$TEST_ROOT/foo\").lastModifiedDate")" =~ 2022111.* ]] + +# The path fetcher does not return lastModified. +[[ "$(nix eval --impure --expr "(builtins.fetchTree \"path://$TEST_ROOT/foo\") ? lastModifiedDate")" = false ]] # Check that we can override lastModified for "path:" inputs. [[ "$(nix eval --impure --expr "(builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; lastModified = 123; }).lastModified")" = 123 ]] diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index c25ac321668a..96d46abf4684 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -71,7 +71,7 @@ echo "$outPath" | grepQuiet 'xyzzy' test -x "$outPath/fetchurl.sh" test -L "$outPath/symlink" -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness # Test unpacking a compressed NAR. narxz="$TEST_ROOT/archive.nar.xz" diff --git a/tests/functional/fib.wasm b/tests/functional/fib.wasm new file mode 100644 index 000000000000..4ea3dd40f9e2 Binary files /dev/null and b/tests/functional/fib.wasm differ diff --git a/tests/functional/fib.wat b/tests/functional/fib.wat new file mode 100644 index 000000000000..3f39a08902b1 --- /dev/null +++ b/tests/functional/fib.wat @@ -0,0 +1,35 @@ +(module + ;; Import host functions from the Nix env module + (import "env" "get_int" (func $get_int (param i32) (result i64))) + (import "env" "make_int" (func $make_int (param i64) (result i32))) + + ;; The host requires an exported memory to read/write data + (memory (export "memory") 1) + + ;; Called once when the module is instantiated; nothing to initialize here + (func (export "nix_wasm_init_v1")) + + ;; Pure wasm: compute fib(n) recursively + ;; fib(0) = 1 + ;; fib(1) = 1 + ;; fib(n) = fib(n-1) + fib(n-2) + (func $fib (param $n i64) (result i64) + (if (i64.le_s (local.get $n) (i64.const 1)) + (then (return (i64.const 1))) + ) + (i64.add + (call $fib (i64.sub (local.get $n) (i64.const 1))) + (call $fib (i64.sub (local.get $n) (i64.const 2))) + ) + ) + + ;; Entry point: receives ValueId of the input integer, returns ValueId of fib(n) + ;; Type: fn(arg: u32) -> u32 (ValueId = u32) + (func (export "fib") (param $arg i32) (result i32) + (call $make_int + (call $fib + (call $get_int (local.get $arg)) + ) + ) + ) +) diff --git a/tests/functional/flakes/build-time-flake-inputs.sh b/tests/functional/flakes/build-time-flake-inputs.sh new file mode 100644 index 000000000000..d1fc1c453603 --- /dev/null +++ b/tests/functional/flakes/build-time-flake-inputs.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +source ./common.sh + +TODO_NixOS +enableFeatures "build-time-fetch-tree" +restartDaemon +requireGit + +lazy="$TEST_ROOT/lazy" +createGitRepo "$lazy" +echo world > "$lazy/who" +git -C "$lazy" add who +git -C "$lazy" commit -a -m foo + +repo="$TEST_ROOT/repo" + +createGitRepo "$repo" + +cat > "$repo/flake.nix" < "$lazy/who" +git -C "$lazy" commit -a -m foo + +nix flake update --flake "$repo" + +clearStore + +nix build --out-link "$TEST_ROOT/result" -L "$repo" +[[ $(cat "$TEST_ROOT/result") = utrecht ]] + +rm -rf "$lazy" + +clearStore + +expectStderr 1 nix build --out-link "$TEST_ROOT/result" -L "$repo" | grepQuiet "Cannot build.*source.drv" + +# `nix flake prefetch-inputs` should ignore build-time inputs. +depDir=$TEST_ROOT/dep +createGitRepo "$depDir" +createSimpleGitFlake "$depDir" + +cat > "$repo/flake.nix" < "$flakeDir"/flake.nix < "$flakeDir"/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) -echo "$checkRes" | grepQuiet "error: overlay is not a function, but a set instead" +echo "$checkRes" | grepQuiet "error: Overlay is not a function." cat > "$flakeDir"/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) -echo "$checkRes" | grepQuiet "unknown-attr" +echo "$checkRes" | grepQuiet "Evaluation check.*apps.system-1.default.isValidApp.*failed" cat > "$flakeDir"/flake.nix < "$flakeDir"/flake.nix < "$flakeDir"/flake.nix < "$flakeDir/flake.nix" <"$TEST_HOME/flake.nix" +{ + inputs.nixpkgs.url = "$TEST_HOME/nixpkgs"; + outputs = {self, nixpkgs}: { + devShells.$system.default = (import ./config.nix).mkDerivation { + name = "hello"; + buildCommand = "set -x; mkdir \$out"; + x = "foo"; + }; + devShell.$system = (import ./config.nix).mkDerivation { + name = "hello"; + buildCommand = "set -x; mkdir \$out"; + x = "bar"; + }; + packages.$system.default = (import ./config.nix).mkDerivation { + name = "hello"; + buildCommand = "set -x; mkdir \$out"; + x = "xyzzy"; + }; + }; +} +EOF + +[[ $(nix develop . -L --command sh -c "echo \$x") == "foo" ]] +[[ $(nix develop ".#devShell.$system" -L --command sh -c "echo \$x") == "bar" ]] +sed -i "$TEST_HOME/flake.nix" -e 's/devShells/devShells2/' # remove devShells +[[ $(nix develop . -L --command sh -c "echo \$x") == "bar" ]] +sed -i "$TEST_HOME/flake.nix" -e 's/devShell/devShell2/' # remove devShell +[[ $(nix develop . -L --command sh -c "echo \$x") == "xyzzy" ]] + +# Check that legacyPackages is used, but only when specifying an explicit package. +cat <"$TEST_HOME/flake.nix" +{ + inputs.nixpkgs.url = "$TEST_HOME/nixpkgs"; + outputs = {self, nixpkgs}: { + legacyPackages.$system.default = (import ./config.nix).mkDerivation { + name = "hello"; + buildCommand = "set -x; mkdir \$out"; + x = "foo"; + }; + }; +} +EOF + +(! nix develop . -L --command sh -c "echo \$x") +[[ $(nix develop .#default -L --command sh -c "echo \$x") == "foo" ]] diff --git a/tests/functional/flakes/dubious-query.sh b/tests/functional/flakes/dubious-query.sh index c147034c3dd9..114def179a8c 100644 --- a/tests/functional/flakes/dubious-query.sh +++ b/tests/functional/flakes/dubious-query.sh @@ -17,7 +17,7 @@ expectStderr 0 nix --offline build --dry-run "git+file://$repoDir?bar#foo" \ # Check that the anchor (#) is taken as a whole, not split, and throws an error. expectStderr 1 nix --offline build --dry-run "git+file://$repoDir#foo?bar" \ - | grepQuiet "error: flake 'git+file://$repoDir' does not provide attribute 'packages.$system.foo?bar', 'legacyPackages.$system.foo?bar' or 'foo?bar'" + | grepQuiet "error: flake 'git+file://$repoDir' does not provide attribute 'packages.$system.foo?bar', 'defaultPackage.$system.foo?bar', 'legacyPackages.$system.foo?bar' or 'foo?bar'" # Check that a literal `?` in the query doesn't print dubious query warning. expectStderr 0 nix --offline build --dry-run "git+file://$repoDir?#foo" \ diff --git a/tests/functional/flakes/eval-cache.sh b/tests/functional/flakes/eval-cache.sh index 75a2c8cacbf9..f4e48d8bb77a 100755 --- a/tests/functional/flakes/eval-cache.sh +++ b/tests/functional/flakes/eval-cache.sh @@ -35,11 +35,11 @@ EOF git -C "$flake1Dir" add flake.nix git -C "$flake1Dir" commit -m "Init" -expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' -expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' # Stack overflow error must not be cached -expect 1 nix build --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ +expect 1 nix build --no-link --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ | grepQuiet 'error: stack overflow; max-call-depth exceeded' # If the SO is cached, the following invocation will produce a cached failure; we expect it to succeed nix build --no-link "$flake1Dir#stack-depth" @@ -48,3 +48,11 @@ nix build --no-link "$flake1Dir#stack-depth" expect 1 nix build "$flake1Dir#ifd" --option allow-import-from-derivation false 2>&1 \ | grepQuiet 'error: cannot build .* during evaluation because the option '\''allow-import-from-derivation'\'' is disabled' nix build --no-link "$flake1Dir#ifd" + +# Test that a store derivation is recreated when it has been deleted +# but the corresponding attribute is still cached. +if ! isTestOnNixOS; then + nix build --no-link "$flake1Dir#drv" + clearStore + nix build --no-link "$flake1Dir#drv" +fi diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index 8d95f61240bd..e3c7cade70ba 100755 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -69,7 +69,9 @@ nix flake metadata "$flake1Dir" | grepQuiet 'URL:.*flake1.*' # Test 'nix flake metadata --json'. json=$(nix flake metadata flake1 --json | jq .) [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]] -[[ -d $(echo "$json" | jq -r .path) ]] +if [[ $(nix config show lazy-trees) = false ]]; then + [[ -d $(echo "$json" | jq -r .path) ]] +fi [[ $(echo "$json" | jq -r .lastModified) = $(git -C "$flake1Dir" log -n1 --format=%ct) ]] hash1=$(echo "$json" | jq -r .revision) [[ -n $(echo "$json" | jq -r .fingerprint) ]] @@ -77,6 +79,7 @@ hash1=$(echo "$json" | jq -r .revision) echo foo > "$flake1Dir/foo" git -C "$flake1Dir" add "$flake1Dir"/foo [[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]] +[[ $(_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake metadata flake1 --json --refresh --warn-large-path-threshold 1 --lazy-trees | jq -r .dirtyRevision) == "$hash1-dirty" ]] [[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]] echo -n '# foo' >> "$flake1Dir/flake.nix" @@ -110,6 +113,11 @@ nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir#default" nix build -o "$TEST_ROOT/result" "$flake1Dir?ref=HEAD#default" nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" +# Check that the fetcher cache works. +if [[ $(nix config show lazy-trees) = false ]]; then + nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" -vvvvv 2>&1 | grepQuiet "source path.*cache hit" +fi + # Check that relative paths are allowed for git flakes. # This may change in the future once git submodule support is refined. # See: https://discourse.nixos.org/t/57783 and #9708. @@ -161,7 +169,12 @@ expect 1 nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --no-update-lock-file nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --commit-lock-file [[ -e "$flake2Dir/flake.lock" ]] [[ -z $(git -C "$flake2Dir" diff main || echo failed) ]] -[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +if [[ $(nix config show lazy-trees) = true ]]; then + # Test that `lazy-locks` causes NAR hashes to be omitted from the lock file. + nix flake update --flake "$flake2Dir" --commit-lock-file --lazy-locks + [[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +fi # Rerunning the build should not change the lockfile. nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" @@ -270,6 +283,9 @@ nix registry remove flake1 [[ $(nix registry list | wc -l) == 4 ]] [[ $(nix registry resolve flake1) = "git+file://$flake1Dir" ]] +# Test the builtin fallback registry. +[[ $(nix registry resolve nixpkgs --flake-registry http://fail.invalid.org/sdklsdklsd --download-attempts 1) = github:NixOS/nixpkgs/nixpkgs-unstable ]] + # Test 'nix registry list' with a disabled global registry. nix registry add user-flake1 git+file://"$flake1Dir" nix registry add user-flake2 "git+file://$percentEncodedFlake2Dir" @@ -435,7 +451,7 @@ nix flake metadata "$flake3Dir" --json --eval-store "dummy://?read-only=false" | rm -rf "$badFlakeDir" mkdir "$badFlakeDir" echo INVALID > "$badFlakeDir"/flake.nix -nix store delete "$(nix store add-path "$badFlakeDir")" +nix store delete --ignore-liveness "$(nix store add-path "$badFlakeDir")" [[ $(nix path-info "$(nix store add-path "$flake1Dir")") =~ flake1 ]] [[ $(nix path-info path:"$(nix store add-path "$flake1Dir")") =~ simple ]] diff --git a/tests/functional/flakes/follow-paths.sh b/tests/functional/flakes/follow-paths.sh index f658a0847f7a..3d668d687ab2 100755 --- a/tests/functional/flakes/follow-paths.sh +++ b/tests/functional/flakes/follow-paths.sh @@ -131,7 +131,7 @@ EOF git -C "$flakeFollowsA" add flake.nix expect 1 nix flake lock "$flakeFollowsA" 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "'flakeB' is too short to be a valid store path" +expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "error: 'flakeB' is too short to be a valid store path" # Test relative non-flake inputs. cat > "$flakeFollowsA"/flake.nix < "$flake1Dir/subflake/flake.nix" < "$flake1Dir/flake.nix" < "$flakeDir/a" -(cd "$flakeDir" && nix flake init) # check idempotence +(cd "$flakeDir" && nix flake init --template "git+file://$templatesDir") # check idempotence # Test 'nix flake init' with conflicts createGitRepo "$flakeDir" echo b > "$flakeDir/a" pushd "$flakeDir" -(! nix flake init) |& grep "refusing to overwrite existing file \"$flakeDir/a\"" +(! nix flake init --template "git+file://$templatesDir") |& grep "refusing to overwrite existing file \"$flakeDir/a\"" popd git -C "$flakeDir" commit -a -m 'Changed' diff --git a/tests/functional/flakes/meson.build b/tests/functional/flakes/meson.build index de76a55804a8..3658b66aceaa 100644 --- a/tests/functional/flakes/meson.build +++ b/tests/functional/flakes/meson.build @@ -34,6 +34,12 @@ suites += { 'source-paths.sh', 'old-lockfiles.sh', 'trace-ifd.sh', + 'build-time-flake-inputs.sh', + 'substitution.sh', + 'shallow.sh', + 'get-flake.sh', + 'provenance.sh', + 'search.sh', ], 'workdir' : meson.current_source_dir(), } diff --git a/tests/functional/flakes/provenance.sh b/tests/functional/flakes/provenance.sh new file mode 100644 index 000000000000..2004ccd5e693 --- /dev/null +++ b/tests/functional/flakes/provenance.sh @@ -0,0 +1,372 @@ +#!/usr/bin/env bash + +experimental_features="provenance" + +source common.sh + +TODO_NixOS + +createFlake1 + +outPath=$(nix build --print-out-paths --no-link "$flake1Dir#packages.$system.default") +drvPath=$(nix eval --raw "$flake1Dir#packages.$system.default.drvPath") +rev=$(nix flake metadata --json "$flake1Dir" | jq -r .locked.rev) +lastModified=$(nix flake metadata --json "$flake1Dir" | jq -r .locked.lastModified) +treePath=$(nix flake prefetch --json "$flake1Dir" | jq -r .storePath) +builder=$(nix eval --raw "$flake1Dir#packages.$system.default._builder") + +# Building a derivation should have tree+subpath+flake+meta+build provenance. +[[ "$(nix path-info --json --json-format 1 "$outPath" | jq ".\"$outPath\".provenance")" == "$(cat < "$flake1Dir/somefile" +git -C "$flake1Dir" add somefile +nix build --impure --print-out-paths --no-link "$flake1Dir#packages.$system.default" +[[ $(nix path-info --json --json-format 1 "$builder" | jq ".\"$builder\".provenance") != null ]] + +[[ "$(nix provenance show "$outPath")" = "$(cat < "$TEST_ROOT/counter" +cat > "$flake1Dir/flake.nix" < \$out + echo x >> "$TEST_ROOT/counter" + ''; + }; + }; + }; +} +EOF +outPath=$(nix build --print-out-paths --no-link "$flake1Dir") + +expectStderr 1 nix provenance verify --all | grepQuiet "derivation .* may not be deterministic: output .* differs" + +# Test various types of source files. +clearStore +echo x > "$TEST_ROOT/counter" +cat > "$flake1Dir/flake.nix" < "$TEST_ROOT/hello.txt" + +path="$(nix store prefetch-file --json "file://$TEST_ROOT/hello.txt" | jq -r .storePath)" + +[[ "$(nix provenance show "$path")" = $(cat < "$TEST_ROOT/hello.txt" + +expectStderr 1 nix provenance verify "$path" | grepQuiet "hash mismatch for URL" + +# Test invalid tag names +for name in "123-invalid" "invalid tag" "invalid@tag" "-invalid" " foo"; do + expectStderr 1 nix build --build-provenance-tags "{\"$name\": \"value\"}" --no-link "$flake1Dir#packages.$system.default" 2>&1 | grepQuiet "tag name '$name' is invalid" +done diff --git a/tests/functional/flakes/search.sh b/tests/functional/flakes/search.sh new file mode 100644 index 000000000000..619a4369cbe4 --- /dev/null +++ b/tests/functional/flakes/search.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +source common.sh + +createFlake1 + +cat > "$flake1Dir/flake.nix" <> "$flake1Dir/flake.nix" +git -C "$flake1Dir" commit -a -m bla + +cat > "$repoDir"/flake.nix < show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.packages.someOtherSystem.default == {}; -assert show_output.packages.${builtins.currentSystem}.default.name == "simple"; -assert show_output.legacyPackages.${builtins.currentSystem} == {}; +assert show_output.inventory.packages.output.children.someOtherSystem.filtered; +assert show_output.inventory.packages.output.children.${builtins.currentSystem}.children.default.derivation.name == "simple"; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.isLegacy; true ' @@ -28,8 +28,8 @@ nix flake show --json --all-systems > show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.packages.someOtherSystem.default.name == "simple"; -assert show_output.legacyPackages.${builtins.currentSystem} == {}; +assert show_output.inventory.packages.output.children.someOtherSystem.children.default.derivation.name == "simple"; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.isLegacy; true ' @@ -39,34 +39,7 @@ nix flake show --json --legacy > show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.legacyPackages.${builtins.currentSystem}.hello.name == "simple"; -true -' - -# Test that attributes are only reported when they have actual content -cat >flake.nix < show-output.json -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output == { }; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.children.hello.derivation.name == "simple"; true ' @@ -87,8 +60,8 @@ nix flake show --json --legacy --all-systems > show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.legacyPackages.${builtins.currentSystem}.AAAAAASomeThingsFailToEvaluate == { }; -assert show_output.legacyPackages.${builtins.currentSystem}.simple.name == "simple"; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.children.AAAAAASomeThingsFailToEvaluate.failed; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.children.simple.derivation.name == "simple"; true ' @@ -98,35 +71,4 @@ popd writeIfdFlake "$flakeDir" pushd "$flakeDir" - -nix flake show --json > show-output.json -# shellcheck disable=SC2016 -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output.packages.${builtins.currentSystem}.default == { }; -true -' - - -# Test that nix keeps going even when packages.$SYSTEM contains not derivations -cat >flake.nix < show-output.json -# shellcheck disable=SC2016 -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output.packages.${builtins.currentSystem}.not-a-derivation == {}; -true -' - +[[ $(nix flake show --json | jq -r ".inventory.packages.output.children.\"$system\".children.default.derivation.name") = top ]] diff --git a/tests/functional/flakes/source-paths.sh b/tests/functional/flakes/source-paths.sh index 3aa3683c27cf..e4f4ec1cda84 100644 --- a/tests/functional/flakes/source-paths.sh +++ b/tests/functional/flakes/source-paths.sh @@ -30,10 +30,10 @@ expectStderr 1 nix eval "$repo#y" | grepQuiet "at $repo/flake.nix:" git -C "$repo" commit -a -m foo -expectStderr 1 nix eval "git+file://$repo?ref=master#y" | grepQuiet "at «git+file://$repo?ref=master&rev=.*»/flake.nix:" +expectStderr 1 nix eval "git+file://$repo?ref=master#y" | grepQuiet "at «git+file://$repo?rev=.*»/flake.nix:" expectStderr 1 nix eval "$repo#z" | grepQuiet "error: Path 'foo' does not exist in Git repository \"$repo\"." -expectStderr 1 nix eval "git+file://$repo?ref=master#z" | grepQuiet "error: '«git+file://$repo?ref=master&rev=.*»/foo' does not exist" +expectStderr 1 nix eval "git+file://$repo?ref=master#z" | grepQuiet "error: '«git+file://$repo?rev=.*»/foo' does not exist" expectStderr 1 nix eval "$repo#a" | grepQuiet "error: Path 'foo' does not exist in Git repository \"$repo\"." echo 123 > "$repo/foo" diff --git a/tests/functional/flakes/substitution.sh b/tests/functional/flakes/substitution.sh new file mode 100644 index 000000000000..97a04931abfc --- /dev/null +++ b/tests/functional/flakes/substitution.sh @@ -0,0 +1,31 @@ +#! /usr/bin/env bash + +# Test that inputs are substituted if they cannot be fetched from their original location. + +source ./common.sh + +if [[ $(nix config show lazy-trees) = true ]]; then + exit 0 +fi + +TODO_NixOS + +createFlake1 +createFlake2 + +nix build --no-link "$flake2Dir#bar" + +path1="$(nix flake metadata --json "$flake1Dir" | jq -r .path)" + +# Building after an input disappeared should succeed, because it's still in the Nix store. +mv "$flake1Dir" "$flake1Dir-tmp" +nix build --no-link "$flake2Dir#bar" --no-eval-cache + +# Check that Nix will fall back to fetching the input from a substituter. +cache="file://$TEST_ROOT/binary-cache" +nix copy --to "$cache" "$path1" +clearStore +nix build --no-link "$flake2Dir#bar" --no-eval-cache --substitute --substituters "$cache" + +clearStore +expectStderr 1 nix build --no-link "$flake2Dir#bar" --no-eval-cache | grepQuiet "Git repository.*does not exist" diff --git a/tests/functional/flakes/unlocked-override.sh b/tests/functional/flakes/unlocked-override.sh index ed05440de03b..ed4d131b7ad0 100755 --- a/tests/functional/flakes/unlocked-override.sh +++ b/tests/functional/flakes/unlocked-override.sh @@ -36,6 +36,7 @@ expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/f grepQuiet "Not writing lock file.*because it has an unlocked input" nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks +_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks --warn-large-path-threshold 1 --lazy-trees # Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning. expectStderr 0 nix eval "$flake2Dir#x" | diff --git a/tests/functional/formatter.sh b/tests/functional/formatter.sh index 03b31708d670..7071055d153e 100755 --- a/tests/functional/formatter.sh +++ b/tests/functional/formatter.sh @@ -85,4 +85,6 @@ rm ./my-result # Flake outputs check. nix flake check -nix flake show | grep -P "package 'formatter'" + +clearStore +expectStderr 0 nix flake show | grepQuiet ": formatter" diff --git a/tests/functional/gc-runtime.nix b/tests/functional/gc-runtime.nix index 10dc5e29f56d..05ce67562fe1 100644 --- a/tests/functional/gc-runtime.nix +++ b/tests/functional/gc-runtime.nix @@ -20,6 +20,7 @@ with import ./config.nix; cat > $out/program << 'EOF' #! ${shell} + echo x > "$2" sleep 10000 < "$1" EOF diff --git a/tests/functional/gc-runtime.sh b/tests/functional/gc-runtime.sh index ac788406dea1..9b675cb97329 100755 --- a/tests/functional/gc-runtime.sh +++ b/tests/functional/gc-runtime.sh @@ -9,18 +9,25 @@ case $system in skipTest "Not running Linux"; esac +TODO_NixOS + set -m # enable job control, needed for kill programPath=$(nix-build --no-link ./gc-runtime.nix -A program) environPath=$(nix-build --no-link ./gc-runtime.nix -A environ) openPath=$(nix-build --no-link ./gc-runtime.nix -A open) +fifo="$TEST_ROOT/fifo" +mkfifo "$fifo" + echo "backgrounding program..." export environPath -"$programPath"/program "$openPath"/open & -sleep 2 # hack - wait for the program to get started +"$programPath"/program "$openPath"/open "$fifo" & child=$! echo PID=$child +cat "$fifo" + +expectStderr 1 nix-store --delete "$openPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root '/proc/" nix-store --gc diff --git a/tests/functional/gc.sh b/tests/functional/gc.sh index c58f47021f84..3ade6e4f582c 100755 --- a/tests/functional/gc.sh +++ b/tests/functional/gc.sh @@ -13,7 +13,7 @@ outPath=$(nix-store -rvv "$drvPath") rm -f "$NIX_STATE_DIR/gcroots/foo" ln -sf "$outPath" "$NIX_STATE_DIR/gcroots/foo" -[ "$(nix-store -q --roots "$outPath")" = "$NIX_STATE_DIR/gcroots/foo -> $outPath" ] +expectStderr 0 nix-store -q --roots "$outPath" | grepQuiet "$NIX_STATE_DIR/gcroots/foo -> $outPath" nix-store --gc --print-roots | grep "$outPath" nix-store --gc --print-live | grep "$outPath" @@ -23,10 +23,10 @@ if nix-store --gc --print-dead | grep -E "$outPath"$; then false; fi nix-store --gc --print-dead inUse=$(readLink "$outPath/reference-to-input-2") -if nix-store --delete "$inUse"; then false; fi +expectStderr 1 nix-store --delete "$inUse" | grepQuiet "Cannot delete path.*because it's referenced by path '" test -e "$inUse" -if nix-store --delete "$outPath"; then false; fi +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root " test -e "$outPath" for i in "$NIX_STORE_DIR"/*; do diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index 6fc25bf90608..2c36236044d0 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -21,7 +21,7 @@ drvPath2=$(nix derivation add < "$TEST_HOME"/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features '' | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) diff --git a/tests/functional/lang/eval-fail-blackhole.err.exp b/tests/functional/lang/eval-fail-blackhole.err.exp index 95e33a5fe456..6866c58dd414 100644 --- a/tests/functional/lang/eval-fail-blackhole.err.exp +++ b/tests/functional/lang/eval-fail-blackhole.err.exp @@ -7,8 +7,8 @@ error: 3| x = y; error: infinite recursion encountered - at /pwd/lang/eval-fail-blackhole.nix:3:7: + at /pwd/lang/eval-fail-blackhole.nix:2:10: + 1| let { 2| body = x; + | ^ 3| x = y; - | ^ - 4| y = x; diff --git a/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp b/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp index 4cc43ca095e1..f142b5c4d45d 100644 --- a/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp +++ b/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp @@ -23,8 +23,3 @@ error: 7| in error: stack overflow; max-call-depth exceeded - at /pwd/lang/eval-fail-deepseq-stack-overflow.nix:5:28: - 4| let - 5| long = builtins.genList (x: x) 100000; - | ^ - 6| reverseLinkedList = builtins.foldl' (tail: head: { inherit head tail; }) null long; diff --git a/tests/functional/lang/eval-fail-derivation-name.err.exp b/tests/functional/lang/eval-fail-derivation-name.err.exp index ba5ff2d002ab..dad9ff6a9f69 100644 --- a/tests/functional/lang/eval-fail-derivation-name.err.exp +++ b/tests/functional/lang/eval-fail-derivation-name.err.exp @@ -16,7 +16,7 @@ error: … while calling the 'derivationStrict' builtin at «nix-internal»/derivation-internal.nix::: | - | strict = derivationStrict drvAttrs; + | strict = drvFunc drvAttrs; | ^ | diff --git a/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp b/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp index c61eab0aa422..1a8dfa681f26 100644 --- a/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp +++ b/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp @@ -1,24 +1,24 @@ error: … while evaluating the attribute 'outPath' - at «nix-internal»/derivation-internal.nix:50:7: - 49| value = commonAttrs // { - 50| outPath = builtins.getAttr outputName strict; + at «nix-internal»/derivation-internal.nix:51:7: + 50| value = commonAttrs // { + 51| outPath = builtins.getAttr outputName strict; | ^ - 51| drvPath = strict.drvPath; + 52| drvPath = strict.drvPath; … while calling the 'getAttr' builtin - at «nix-internal»/derivation-internal.nix:50:17: - 49| value = commonAttrs // { - 50| outPath = builtins.getAttr outputName strict; + at «nix-internal»/derivation-internal.nix:51:17: + 50| value = commonAttrs // { + 51| outPath = builtins.getAttr outputName strict; | ^ - 51| drvPath = strict.drvPath; + 52| drvPath = strict.drvPath; … while calling the 'derivationStrict' builtin - at «nix-internal»/derivation-internal.nix:37:12: - 36| - 37| strict = derivationStrict drvAttrs; + at «nix-internal»/derivation-internal.nix:38:12: + 37| + 38| strict = drvFunc drvAttrs; | ^ - 38| + 39| … while evaluating derivation 'test' whose name attribute is located at /pwd/lang/eval-fail-derivation-structuredAttrs-stack-overflow.nix:5:3 diff --git a/tests/functional/lang/eval-fail-memoised-error-trace-not-mutated.err.exp b/tests/functional/lang/eval-fail-memoised-error-trace-not-mutated.err.exp index f8807b6bd9a5..9327371bf228 100644 --- a/tests/functional/lang/eval-fail-memoised-error-trace-not-mutated.err.exp +++ b/tests/functional/lang/eval-fail-memoised-error-trace-not-mutated.err.exp @@ -15,6 +15,8 @@ error: … forcing d + … forcing c + … forcing b … while calling the 'throw' builtin diff --git a/tests/functional/lang/eval-fail-recursion.err.exp b/tests/functional/lang/eval-fail-recursion.err.exp index ee41ff46bea9..21bf7a695bdd 100644 --- a/tests/functional/lang/eval-fail-recursion.err.exp +++ b/tests/functional/lang/eval-fail-recursion.err.exp @@ -1,14 +1,14 @@ error: … in the right operand of the update (//) operator - at /pwd/lang/eval-fail-recursion.nix:2:14: + at /pwd/lang/eval-fail-recursion.nix:2:11: 1| let 2| a = { } // a; - | ^ + | ^ 3| in error: infinite recursion encountered - at /pwd/lang/eval-fail-recursion.nix:2:14: - 1| let - 2| a = { } // a; - | ^ + at /pwd/lang/eval-fail-recursion.nix:4:1: 3| in + 4| a.foo + | ^ + 5| diff --git a/tests/functional/lang/eval-fail-scope-5.err.exp b/tests/functional/lang/eval-fail-scope-5.err.exp index 6edc85f4f161..20027a7f1f57 100644 --- a/tests/functional/lang/eval-fail-scope-5.err.exp +++ b/tests/functional/lang/eval-fail-scope-5.err.exp @@ -21,8 +21,8 @@ error: 8| x ? y, error: infinite recursion encountered - at /pwd/lang/eval-fail-scope-5.nix:8:11: - 7| { - 8| x ? y, - | ^ - 9| y ? x, + at /pwd/lang/eval-fail-scope-5.nix:11:5: + 10| }: + 11| x + y; + | ^ + 12| diff --git a/tests/functional/lang/eval-okay-filterattrs-names.exp b/tests/functional/lang/eval-okay-filterattrs-names.exp new file mode 100644 index 000000000000..3f07d6e1a028 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs-names.exp @@ -0,0 +1 @@ +{ a = 3; } diff --git a/tests/functional/lang/eval-okay-filterattrs-names.nix b/tests/functional/lang/eval-okay-filterattrs-names.nix new file mode 100644 index 000000000000..94108fbefdaf --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs-names.nix @@ -0,0 +1,5 @@ +builtins.filterAttrs (name: value: name == "a") { + a = 3; + b = 6; + c = 10; +} diff --git a/tests/functional/lang/eval-okay-filterattrs.exp b/tests/functional/lang/eval-okay-filterattrs.exp new file mode 100644 index 000000000000..74b9825e9c42 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs.exp @@ -0,0 +1 @@ +{ b = 6; c = 10; } diff --git a/tests/functional/lang/eval-okay-filterattrs.nix b/tests/functional/lang/eval-okay-filterattrs.nix new file mode 100644 index 000000000000..28d37bbe7843 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs.nix @@ -0,0 +1,5 @@ +builtins.filterAttrs (name: value: value > 5) { + a = 3; + b = 6; + c = 10; +} diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index c3ddf6ce65f7..51164bc47179 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -96,3 +96,30 @@ nix-sandbox-build symlink-derivation.nix -A test_sandbox_paths \ --option extra-sandbox-paths "/dir=$TEST_ROOT" \ --option extra-sandbox-paths "/symlinkDir=$symlinkDir" \ --option extra-sandbox-paths "/symlink=$symlinkcert" + +# Nonexistent sandbox paths should error early in the build process +# shellcheck disable=SC2016 +expectStderr 1 nix-sandbox-build --option extra-sandbox-paths '/does-not-exist' \ + -E 'with import '"${config_nix}"'; mkDerivation { name = "trivial"; buildCommand = "echo > $out"; }' | + grepQuiet "path \"/does-not-exist\" is configured as part of the \`sandbox-paths\` option, but is inaccessible" + +# Test pre-build-hook. +DEST="$TEST_ROOT/hook-output" +HOOK="$TEST_ROOT/pre-build-hook" + +echo foo > "$TEST_ROOT"/fnord + +cat > "$HOOK" < "$DEST" +echo "hello from hook!" >&2 +echo "extra-sandbox-paths" +echo "/foo/bar=$TEST_ROOT/fnord" +EOF +chmod +x "$HOOK" + +outPath=$(nix-build --no-out-link --sandbox-paths /nix/store --pre-build-hook "$HOOK" symlink-derivation.nix -A test_sandbox_paths_2) + +[[ $(cat "$TEST_ROOT/store0/nix/store/$(basename "$outPath")/xyzzy") = foo ]] + +[[ "$(cat "$DEST")" == "test-sandbox-paths-2" ]] diff --git a/tests/functional/local-overlay-store/delete-refs-inner.sh b/tests/functional/local-overlay-store/delete-refs-inner.sh index f54ef2bb6b49..708e8c5a8dfd 100644 --- a/tests/functional/local-overlay-store/delete-refs-inner.sh +++ b/tests/functional/local-overlay-store/delete-refs-inner.sh @@ -23,14 +23,14 @@ input2=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg input3=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2 -A passthru.input3 -j0) # Can't delete because referenced -expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path.*because it's referenced by path" # These same paths are referenced in the lower layer (by the seed 1 # build done in `initLowerStore`). -expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path.*because it's referenced by path" # Can delete nix-store --delete "$hermetic" diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh index 600fce43e940..ffb1e6d9621e 100755 --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -40,5 +40,6 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then nix build -vv --file dependencies.nix --no-link --json-log-path "$TEST_ROOT/log.json" 2>&1 | grepQuiet 'building.*dependencies-top.drv' jq < "$TEST_ROOT/log.json" grep '{"action":"start","fields":\[".*-dependencies-top.drv","",1,1\],"id":.*,"level":3,"parent":0' "$TEST_ROOT/log.json" >&2 + grep -E '{"action":"result","id":[^,]+,"payload":{"builtOutputs":{"out":{"dependentRealisations":\{\},"id":"[^"]+","outPath":"[^-]+-dependencies-top".*"status":"Built".*"success":true' "$TEST_ROOT/log.json" >&2 (( $(grep -c '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" ) == 5 )) fi diff --git a/tests/functional/meson.build b/tests/functional/meson.build index bc4d2643e265..d3d4232a388a 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -178,6 +178,8 @@ suites = [ 'help.sh', 'symlinks.sh', 'external-builders.sh', + 'wasm.sh', + 'sentry.sh', ], 'workdir' : meson.current_source_dir(), }, diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index 131b63323e57..b8bbb74dddd7 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -23,11 +23,11 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo "$eval_arg_res" | grep "at «string»:1:15:" +echo "$eval_arg_res" | grep "at «string»:1:12:" echo "$eval_arg_res" | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo "$eval_stdin_res" | grep "at «stdin»:1:15:" +echo "$eval_stdin_res" | grep "at «stdin»:1:12:" echo "$eval_stdin_res" | grep "infinite recursion encountered" # Attribute path errors diff --git a/tests/functional/nix-copy-ssh-ng.sh b/tests/functional/nix-copy-ssh-ng.sh index f74f3bb86c34..1af6c4fb069d 100755 --- a/tests/functional/nix-copy-ssh-ng.sh +++ b/tests/functional/nix-copy-ssh-ng.sh @@ -15,4 +15,8 @@ nix store info --store "$remoteStore" # Regression test for https://github.com/NixOS/nix/issues/6253 nix copy --to "$remoteStore" "$outPath" --no-check-sigs & -nix copy --to "$remoteStore" "$outPath" --no-check-sigs +pid1="$!" +nix copy --to "$remoteStore" "$outPath" --no-check-sigs & +pid2="$!" +wait "$pid1" +wait "$pid2" diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index 8d7e05fb3556..6ee78c5dc5eb 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -4,6 +4,8 @@ source common.sh TODO_NixOS +requireGit + clearStore clearProfiles @@ -12,7 +14,7 @@ restartDaemon # Make a flake. flake1Dir=$TEST_ROOT/flake1 -mkdir -p "$flake1Dir" +createGitRepo "$flake1Dir" # shellcheck disable=SC2154,SC1039 cat > "$flake1Dir"/flake.nix < "$flake1Dir"/ca.nix cp "${config_nix}" "$flake1Dir"/ +git -C "$flake1Dir" add flake.nix config.nix who version ca.nix +git -C "$flake1Dir" commit -m 'Initial' + # Test upgrading from nix-env. nix-env -f ./user-envs.nix -i foo-1.0 nix profile list | grep -A2 'Name:.*foo' | grep 'Store paths:.*foo-1.0' nix profile add "$flake1Dir" -L -nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' +#nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' [[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] [ -e "$TEST_HOME"/.nix-profile/share/man ] # shellcheck disable=SC2235 (! [ -e "$TEST_HOME"/.nix-profile/include ]) nix profile history -nix profile history | grep "packages.$system.default: ∅ -> 1.0" -nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' +nix profile history | grep "packages.$system.default: 1.0, 1.0-man added" +nix profile diff-closures | grep 'env-manifest.nix: (no version) removed' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" @@ -73,6 +78,25 @@ unset NIX_CONFIG # Test conflicting package add. nix profile add "$flake1Dir" 2>&1 | grep "warning: 'flake1' is already added" +# Test tab completion of profile elements +# The profile should have 'foo' and 'flake1' installed at this point +completion_output=$(NIX_GET_COMPLETIONS=3 nix profile remove '' 2>&1) +echo "$completion_output" | grep -q "^normal$" +echo "$completion_output" | grep -q "^flake1" +echo "$completion_output" | grep -q "^foo" + +# Test prefix matching - should only complete 'flake1' when prefix is 'fl' +completion_output=$(NIX_GET_COMPLETIONS=3 nix profile remove 'fl' 2>&1) +echo "$completion_output" | grep -q "^normal$" +echo "$completion_output" | grep -q "^flake1" +echo "$completion_output" | grepQuietInverse "^foo" + +# Test completion with upgrade command +completion_output=$(NIX_GET_COMPLETIONS=3 nix profile upgrade '' 2>&1) +echo "$completion_output" | grep -q "^normal$" +echo "$completion_output" | grep -q "^flake1" +echo "$completion_output" | grep -q "^foo" + # Test upgrading a package. printf NixOS > "$flake1Dir"/who printf 2.0 > "$flake1Dir"/version @@ -96,6 +120,7 @@ printf 1.0 > "$flake1Dir"/version # Test --all exclusivity. assertStderr nix --offline profile upgrade --all foo << EOF error: --all cannot be used with package names or regular expressions. + Try 'nix --help' for more information. EOF @@ -130,9 +155,8 @@ nix profile rollback [ -e "$TEST_HOME"/.nix-profile/bin/foo ] # shellcheck disable=SC2235 nix profile remove foo 2>&1 | grep 'removed 1 packages' -# shellcheck disable=SC2235 -(! [ -e "$TEST_HOME"/.nix-profile/bin/foo ]) -nix profile history | grep 'foo: 1.0 -> ∅' +[[ ! -e "$TEST_HOME"/.nix-profile/bin/foo ]] +nix profile history | grep 'foo: 1.0 removed' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. @@ -224,11 +248,11 @@ error: An existing package already provides the following file: The conflicting packages have a priority of 5. To prioritise the new package: - nix profile add path:${flake2Dir}#packages.${system}.default --priority 4 + nix profile add git+file://${flake2Dir}#packages.${system}.default --priority 4 To prioritise the existing package: - nix profile add path:${flake2Dir}#packages.${system}.default --priority 6 + nix profile add git+file://${flake2Dir}#packages.${system}.default --priority 6 EOF ) [[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] diff --git a/tests/functional/package.nix b/tests/functional/package.nix index b3b314a50d70..2c1146ec4e85 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -26,6 +26,9 @@ # For running the functional tests against a different pre-built Nix. test-daemon ? null, + + # Whether to run tests with lazy trees enabled. + lazyTrees ? false, }: let @@ -95,6 +98,8 @@ mkMesonDerivation ( mkdir $out ''; + _NIX_TEST_EXTRA_CONFIG = lib.optionalString lazyTrees "lazy-trees = true"; + meta = { platforms = lib.platforms.unix; }; diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 712b5267878c..3d6041914ed6 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -13,7 +13,7 @@ barBase=$(basename "$bar") echo baz > "$TEST_ROOT"/baz baz=$(nix store add-file "$TEST_ROOT"/baz) bazBase=$(basename "$baz") -nix-store --delete "$baz" +nix-store --delete --ignore-liveness "$baz" diff --unified --color=always \ <(nix path-info --json --json-format 2 "$foo" "$bar" "$baz" | diff --git a/tests/functional/recursive.nix b/tests/functional/recursive.nix index be9e55da37ec..aa2aa26c5494 100644 --- a/tests/functional/recursive.nix +++ b/tests/functional/recursive.nix @@ -17,7 +17,7 @@ mkDerivation rec { buildCommand = '' mkdir $out - opts="--experimental-features nix-command ${ + opts="${ if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else "" }" diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 9115aa77583d..16c3fdab4df0 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -14,7 +14,7 @@ rm -f "$TEST_ROOT"/result unreachable=$(nix store add-path ./recursive.sh) export unreachable -NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'nix-command recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix [[ $(cat "$TEST_ROOT"/result/inner1) =~ blaat ]] diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh index 31f2fbdd88e7..7c28a7529009 100755 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -220,7 +220,7 @@ EOF testReplResponse ' foo + baz ' "3" \ - ./flake ./flake\#bar --experimental-features 'flakes' + ./flake ./flake\#bar testReplResponse $' :a { a = 1; b = 2; longerName = 3; "with spaces" = 4; } @@ -255,7 +255,7 @@ testReplResponseNoRegex $' # - Check that the result has changed mkfifo repl_fifo touch repl_output -nix repl ./flake --experimental-features 'flakes' < repl_fifo >> repl_output 2>&1 & +nix repl ./flake < repl_fifo >> repl_output 2>&1 & repl_pid=$! exec 3>repl_fifo # Open fifo for writing echo "changingThing" >&3 @@ -385,7 +385,7 @@ import $testDir/lang/parse-fail-eof-pos.nix badDiff=0 badExitCode=0 -nixVersion="$(nix eval --impure --raw --expr 'builtins.nixVersion' --extra-experimental-features nix-command)" +nixVersion="$(nix --version | sed 's/nix //')" # TODO: write a repl interacter for testing. Papering over the differences between readline / editline and between platforms is a pain. diff --git a/tests/functional/sentry.sh b/tests/functional/sentry.sh new file mode 100644 index 000000000000..0be7a3d305a4 --- /dev/null +++ b/tests/functional/sentry.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +source common.sh + +# Enable sentry with a fake endpoint. +unset NIX_SENTRY_ENDPOINT +echo -n "file://$TEST_ROOT/sentry-endpoint" > "$test_nix_conf_dir/sentry-endpoint" + +ulimit -c 0 + +sentryDir="$TEST_HOME/.cache/nix/sentry" + +nix --version +if ! [[ -d $sentryDir ]]; then + skipTest "not built with sentry support" +fi + +waitForCrashDump() { + local i + for ((i = 0; i < 10; i++)); do + envelopes=("$sentryDir"/pending/*.dmp) + if [[ -e "${envelopes[0]}" ]]; then + return 0 + fi + sleep 0.1 + done + return 1 +} + +for type in segfault assert logic-error; do + if [[ $type = logic-error && $(uname) = Darwin ]]; then continue; fi + + rm -rf "$sentryDir" + + (! nix __crash "$type") + + if ! waitForCrashDump; then + fail "No crash dump found in $sentryDir after crash" + fi +done + +rm -rf "$sentryDir" + +if nix shell --file ./simple.nix --command bash -c 'kill -SEGV $$'; then + fail "Command did not segfault" +fi + +if waitForCrashDump; then + fail "Unexpected crash dump" +fi diff --git a/tests/functional/shell.sh b/tests/functional/shell.sh index 9769c90d1223..431cc2b598d9 100755 --- a/tests/functional/shell.sh +++ b/tests/functional/shell.sh @@ -65,6 +65,7 @@ path=$(nix eval --raw -f shell-hello.nix hello) # Note: we need the sandbox paths to ensure that the shell is # visible in the sandbox. +export NIX_SENTRY_ENDPOINT=file://$TEST_ROOT/sentry-endpoint # test whether Sentry is disabled in the chroot helper nix shell --sandbox-build-dir /build-tmp \ --sandbox-paths '/nix? /bin? /lib? /lib64? /usr?' \ --store "$TEST_ROOT/store0" -f shell-hello.nix hello -c hello | grep 'Hello World' diff --git a/tests/functional/simple.nix b/tests/functional/simple.nix index 2035ca294cce..bd8b234852d2 100644 --- a/tests/functional/simple.nix +++ b/tests/functional/simple.nix @@ -3,7 +3,23 @@ with import ./config.nix; mkDerivation { name = "simple"; builder = ./simple.builder.sh; + _builder = ./simple.builder.sh; PATH = ""; goodPath = path; - meta.position = "${__curPos.file}:${toString __curPos.line}"; + meta = { + position = "${__curPos.file}:${toString __curPos.line}"; + license = [ + # Since this file is from Nix, use Nix's license. + # Keep in sync with `lib.licenses.lgpl21` from Nixpkgs. + { + deprecated = true; + free = true; + fullName = "GNU Lesser General Public License v2.1"; + redistributable = true; + shortName = "lgpl21"; + spdxId = "LGPL-2.1"; + url = "https://spdx.org/licenses/LGPL-2.1.html"; + } + ]; + }; } diff --git a/tests/functional/simple.sh b/tests/functional/simple.sh index f6507d74182a..366c75a3af0e 100755 --- a/tests/functional/simple.sh +++ b/tests/functional/simple.sh @@ -21,7 +21,7 @@ TODO_NixOS # Directed delete: $outPath is not reachable from a root, so it should # be deleteable. -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness [[ ! -e $outPath/hello ]] outPath="$(NIX_REMOTE='local?store=/foo&real='"$TEST_ROOT"'/real-store' nix-instantiate --readonly-mode hash-check.nix)" diff --git a/tests/functional/store-info.sh b/tests/functional/store-info.sh index adaee5dfecfc..ee896929ae3d 100755 --- a/tests/functional/store-info.sh +++ b/tests/functional/store-info.sh @@ -65,7 +65,7 @@ check_human_readable "$STORE_INFO" check_human_readable "$LEGACY_STORE_INFO" if [[ -v NIX_DAEMON_PACKAGE ]] && isDaemonNewer "2.7.0pre20220126"; then - DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | cut -d' ' -f3) + DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | sed 's/.*) //') echo "$STORE_INFO" | grep "Version: $DAEMON_VERSION" [[ "$(echo "$STORE_INFO_JSON" | jq -r ".version")" == "$DAEMON_VERSION" ]] fi diff --git a/tests/functional/symlink-derivation.nix b/tests/functional/symlink-derivation.nix index e9a74cdcef27..6f7f5716a4b1 100644 --- a/tests/functional/symlink-derivation.nix +++ b/tests/functional/symlink-derivation.nix @@ -56,4 +56,12 @@ in touch $out ''; }; + + test_sandbox_paths_2 = mkDerivation { + name = "test-sandbox-paths-2"; + buildCommand = '' + mkdir $out + cat /foo/bar > $out/xyzzy + ''; + }; } diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index e7d9b96fda33..295ee521d768 100755 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -38,6 +38,9 @@ test_tarball() { [[ $(nix eval --impure --expr "(fetchTree \"file://$tarball\").lastModified") = 1000000000 ]] + # fetchTree with a narHash is implicitly final, so it doesn't return attributes like lastModified. + [[ $(nix eval --impure --expr "(fetchTree { type = \"tarball\"; url = \"file://$tarball\"; narHash = \"$hash\"; }) ? lastModified") = false ]] + nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = \"file://$tarball\"; narHash = \"$hash\"; })) ? submodules)" >&2 nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = \"file://$tarball\"; narHash = \"$hash\"; })) ? submodules)" 2>&1 | grep 'true' @@ -116,3 +119,17 @@ path="$(nix flake prefetch --refresh --json "tarball+file://$TEST_ROOT/tar.tar" # Test that unpacking an empty file does not segfault (see https://github.com/NixOS/nix/issues/15116). touch "$TEST_ROOT/empty" expectStderr 1 nix store prefetch-file --unpack "file://$TEST_ROOT/empty" | grepQuiet "archive.*is empty" + +# Test that concurrent invocations of Nix will fetch the tarball only once. +rm -rf "$TEST_HOME/.cache" +store="$TEST_ROOT/prefetch-store" +nix-store --store "$store" --init # needed because concurrent creation of the store can give SQLite errors +_NIX_TEST_CONCURRENT_FETCHES=1 _NIX_FORCE_HTTP=1 nix flake prefetch --store "$store" -v "tarball+file://$TEST_ROOT/tar.tar" 2> "$TEST_ROOT/log1" & +pid1="$!" +_NIX_TEST_CONCURRENT_FETCHES=1 _NIX_FORCE_HTTP=1 nix flake prefetch --store "$store" -v "tarball+file://$TEST_ROOT/tar.tar" 2> "$TEST_ROOT/log2" & +pid2="$!" +wait "$pid1" +wait "$pid2" +[[ $(cat "$TEST_ROOT/log1" "$TEST_ROOT/log2" | grep -c "Download.*to") -eq 2 ]] +[[ $(cat "$TEST_ROOT/log1" "$TEST_ROOT/log2" | grep -c "downloading.*tar.tar") -eq 1 ]] +[[ $(cat "$TEST_ROOT/log1" "$TEST_ROOT/log2" | grep -c "waiting for another Nix process to finish fetching input") -eq 1 ]] diff --git a/tests/functional/wasm.sh b/tests/functional/wasm.sh new file mode 100644 index 000000000000..f0b64fd65fd4 --- /dev/null +++ b/tests/functional/wasm.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +source common.sh + +if [[ $(nix eval --extra-experimental-features wasm-builtin --expr 'builtins ? wasm') = false ]]; then + skipTest "builtins.wasm not available" +fi + +# Test running a WebAssembly module in text format (WAT). +[[ $(nix eval --json --impure \ + --extra-experimental-features wasm-builtin \ + --expr "builtins.wasm { wat = builtins.readFile ./fib.wat; function = \"fib\"; } 40") = 165580141 ]] + +# Test running a WebAssembly module in binary format (.wasm). +[[ $(nix eval --json --impure \ + --extra-experimental-features wasm-builtin \ + --expr "builtins.wasm { path = ./fib.wasm; function = \"fib\"; } 40") = 165580141 ]] diff --git a/tests/installer/default.nix b/tests/installer/default.nix index e4b891166adc..c0ce41233e65 100644 --- a/tests/installer/default.nix +++ b/tests/installer/default.nix @@ -232,7 +232,7 @@ let source /etc/bashrc || true nix-env --version - nix --extra-experimental-features nix-command store info + nix store info out=\$(nix-build --no-substitute -E 'derivation { name = "foo"; system = "x86_64-linux"; builder = "/bin/sh"; args = ["-c" "echo foobar > \$out"]; }') [[ \$(cat \$out) = foobar ]] diff --git a/tests/nixos/authorization.nix b/tests/nixos/authorization.nix index 6540e9fa3379..944e59259253 100644 --- a/tests/nixos/authorization.nix +++ b/tests/nixos/authorization.nix @@ -13,8 +13,6 @@ users.users.alice.isNormalUser = true; users.users.bob.isNormalUser = true; users.users.mallory.isNormalUser = true; - - nix.settings.experimental-features = "nix-command"; }; testScript = diff --git a/tests/nixos/cgroups/default.nix b/tests/nixos/cgroups/default.nix index a6b4bca8c76b..4161aba2ca2f 100644 --- a/tests/nixos/cgroups/default.nix +++ b/tests/nixos/cgroups/default.nix @@ -9,7 +9,7 @@ { virtualisation.additionalPaths = [ pkgs.stdenvNoCC ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.settings.use-cgroups = true; diff --git a/tests/nixos/chroot-store.nix b/tests/nixos/chroot-store.nix index 0a4fff99222b..ecac371e1521 100644 --- a/tests/nixos/chroot-store.nix +++ b/tests/nixos/chroot-store.nix @@ -25,7 +25,6 @@ in virtualisation.writableStore = true; virtualisation.additionalPaths = [ pkgA ]; environment.systemPackages = [ pkgB ]; - nix.extraOptions = "experimental-features = nix-command"; }; }; diff --git a/tests/nixos/containers/containers.nix b/tests/nixos/containers/containers.nix index b590dc8498f7..8d07c80b6a3c 100644 --- a/tests/nixos/containers/containers.nix +++ b/tests/nixos/containers/containers.nix @@ -23,7 +23,7 @@ virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.nixPath = [ "nixpkgs=${nixpkgs}" ]; diff --git a/tests/nixos/content-encoding.nix b/tests/nixos/content-encoding.nix index 5faca035f2f8..aa8d76fe7b87 100644 --- a/tests/nixos/content-encoding.nix +++ b/tests/nixos/content-encoding.nix @@ -131,6 +131,7 @@ in start_all() machine.wait_for_unit("nginx.service") + machine.wait_for_open_port(80) # Original test: zstd archive with gzip content-encoding # Make sure that the file is properly compressed as the test would be meaningless otherwise diff --git a/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix new file mode 100644 index 000000000000..a241c877d21e --- /dev/null +++ b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix @@ -0,0 +1,49 @@ +{ config, ... }: +{ + description = "build-time fetching"; + script = '' + import json + + # add a file to the repo + client.succeed(f""" + echo ${config.name # to make the git tree and store path unique + } > {repo.path}/test-case \ + && echo chiang-mai > {repo.path}/thailand \ + && {repo.git} add test-case thailand \ + && {repo.git} commit -m 'commit1' \ + && {repo.git} push origin main + """) + + # get the NAR hash + nar_hash = json.loads(client.succeed(f""" + nix flake prefetch --flake-registry "" git+{repo.remote} --json + """))['hash'] + + # construct the derivation + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "git"; + url = "{repo.remote}"; + ref = "main"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + + # do the build-time fetch + out_path = client.succeed(f""" + nix build --print-out-paths --store /run/store --flake-registry "" --extra-experimental-features build-time-fetch-tree --expr '{expr}' + """).strip() + + # check if the committed file is there + client.succeed(f""" + test -f /run/store/{out_path}/thailand + """) + ''; +} diff --git a/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix b/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix index f635df1f8793..a204caedd578 100644 --- a/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix +++ b/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix @@ -1,5 +1,5 @@ { - description = "fetchTree fetches git repos shallowly by default"; + description = "fetchTree fetches git repos shallowly if possible"; script = '' # purge nix git cache to make sure we start with a clean slate client.succeed("rm -rf ~/.cache/nix") @@ -28,6 +28,7 @@ type = "git"; url = "{repo.remote}"; rev = "{commit2_rev}"; + revCount = 1234; }} """ diff --git a/tests/nixos/fetch-git/testsupport/setup.nix b/tests/nixos/fetch-git/testsupport/setup.nix index 1fbf5ff7448f..7e5423e9d6e1 100644 --- a/tests/nixos/fetch-git/testsupport/setup.nix +++ b/tests/nixos/fetch-git/testsupport/setup.nix @@ -81,10 +81,6 @@ in environment.variables = { _NIX_FORCE_HTTP = "1"; }; - nix.settings.experimental-features = [ - "nix-command" - "flakes" - ]; }; setupScript = ""; testScript = '' diff --git a/tests/nixos/fetchers-substitute.nix b/tests/nixos/fetchers-substitute.nix index 7abadd43af64..1ca4e4825155 100644 --- a/tests/nixos/fetchers-substitute.nix +++ b/tests/nixos/fetchers-substitute.nix @@ -167,28 +167,5 @@ content = importer.succeed(f"cat {result_path}/hello.txt").strip() assert content == "Hello from tarball!", f"Content mismatch: {content}" print("✓ fetchTarball content verified!") - - ########################################## - # Test 3: Verify fetchTree does NOT substitute (preserves metadata) - ########################################## - - print("Testing that fetchTree without __final does NOT use substitution...") - - # fetchTree with just narHash (not __final) should try to download, which will fail - # since the file doesn't exist on the importer - exit_code = importer.fail(f""" - nix-instantiate --eval --json --read-write-mode --expr ' - builtins.fetchTree {{ - type = "tarball"; - url = "file:///only-on-substituter.tar.gz"; - narHash = "{tarball_hash_sri}"; - }} - ' 2>&1 - """) - - # Should fail with "does not exist" since it tries to download instead of substituting - assert "does not exist" in exit_code or "Couldn't open file" in exit_code, f"Expected download failure, got: {exit_code}" - print("✓ fetchTree correctly does NOT substitute non-final inputs!") - print(" (This preserves metadata like lastModified from the actual fetch)") ''; } diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index e8663debbcd4..d75cc2017de2 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -64,8 +64,6 @@ in ]; virtualisation.writableStore = true; - - nix.settings.experimental-features = "nix-command"; }; }; diff --git a/tests/nixos/fsync.nix b/tests/nixos/fsync.nix index 2af9773336d8..4cdb17ada0c7 100644 --- a/tests/nixos/fsync.nix +++ b/tests/nixos/fsync.nix @@ -23,7 +23,6 @@ in { virtualisation.emptyDiskImages = [ 1024 ]; environment.systemPackages = [ pkg1 ]; - nix.settings.experimental-features = [ "nix-command" ]; nix.settings.fsync-store-paths = true; nix.settings.require-sigs = false; boot.supportedFilesystems = [ diff --git a/tests/nixos/functional/common.nix b/tests/nixos/functional/common.nix index 57f0bbc6a1c5..74ef7ea27623 100644 --- a/tests/nixos/functional/common.nix +++ b/tests/nixos/functional/common.nix @@ -15,6 +15,7 @@ ]; nix.settings.substituters = lib.mkForce [ ]; + systemd.services.nix-daemon.environment._NIX_IN_TEST = "1"; environment.systemPackages = let diff --git a/tests/nixos/git-submodules.nix b/tests/nixos/git-submodules.nix index c6f53ada2dc7..9105eb79bd7c 100644 --- a/tests/nixos/git-submodules.nix +++ b/tests/nixos/git-submodules.nix @@ -24,7 +24,6 @@ { programs.ssh.extraConfig = "ConnectTimeout 30"; environment.systemPackages = [ pkgs.git ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index d14cd9d0c75d..3a72c669162e 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -17,7 +17,7 @@ let openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \ -subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr - openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org") \ + openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org,DNS:install.determinate.systems") \ -days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt ''; @@ -107,13 +107,13 @@ in services.httpd.extraConfig = '' ErrorLog syslog:local6 ''; - services.httpd.virtualHosts."channels.nixos.org" = { + services.httpd.virtualHosts."install.determinate.systems" = { forceSSL = true; sslServerKey = "${cert}/server.key"; sslServerCert = "${cert}/server.crt"; servedDirs = [ { - urlPath = "/"; + urlPath = "/flake-registry/stable/"; dir = registry; } ]; @@ -163,9 +163,9 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; networking.hosts.${(builtins.head nodes.github.networking.interfaces.eth1.ipv4.addresses).address} = [ + "install.determinate.systems" "channels.nixos.org" "api.github.com" "github.com" @@ -204,14 +204,53 @@ in assert info["revision"] == "${nixpkgs.rev}", f"revision mismatch: {info['revision']} != ${nixpkgs.rev}" cat_log() + out = client.succeed("nix flake prefetch nixpkgs --json") + nar_hash = json.loads(out)['hash'] + + # Test build-time fetching of public flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "NixOS"; + repo = "nixpkgs"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree -L --expr '{expr}'") + # ... otherwise it should use the API - out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0") + out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0 --no-trust-tarballs-from-git-forges") print(out) info = json.loads(out) assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}" assert info["fingerprint"] cat_log() + # Test build-time fetching of private flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "fancy-enterprise"; + repo = "private-flake"; + }}; + outputHashMode = "recursive"; + outputHash = "{info['locked']['narHash']}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree --access-tokens github.com=ghp_000000000000000000000000000000000000 -L --expr '{expr}'") + # Fetching with the resolved URL should produce the same result. info2 = json.loads(client.succeed(f"nix flake metadata {info['url']} --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")) print(info["fingerprint"], info2["fingerprint"]) @@ -225,6 +264,10 @@ in hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'") assert hash == info['locked']['narHash'] + # Fetching with an incorrect NAR hash should fail. + out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree \"github:fancy-enterprise/private-flake/{info['revision']}?narHash=sha256-HsrRFZYg69qaVe/wDyWBYLeS6ca7ACEJg2Z%2BGpEFw4A%3D\").narHash' 2>&1") + assert "mismatch in field 'narHash'" in out, "NAR hash check did not fail with the expected error" + # Fetching without a narHash should succeed if trust-github is set and fail otherwise. client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'") out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1") diff --git a/tests/nixos/nix-copy.nix b/tests/nixos/nix-copy.nix index 64de622de760..a7f0a6a326f4 100644 --- a/tests/nixos/nix-copy.nix +++ b/tests/nixos/nix-copy.nix @@ -39,7 +39,6 @@ in pkgD.drvPath ]; nix.settings.substituters = lib.mkForce [ ]; - nix.settings.experimental-features = [ "nix-command" ]; services.getty.autologinUser = "root"; programs.ssh.extraConfig = '' Host * diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index b2a86643d1e0..54ca03228597 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -39,7 +39,6 @@ in environment.systemPackages = [ pkgs.minio-client ]; nix.nixPath = [ "nixpkgs=${pkgs.path}" ]; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; services.minio = { @@ -64,7 +63,6 @@ in virtualisation.writableStore = true; virtualisation.cores = 2; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; networking.extraHosts = "192.168.1.2 vhost-test.minio.local minio.local"; @@ -955,6 +953,100 @@ in ) verify_packages_in_store(client, PKGS['A']) + @setup_s3( + populate_bucket=[PKGS['A']], + profiles={ + "valid": {"access_key": ACCESS_KEY, "secret_key": SECRET_KEY}, + "invalid": {"access_key": "INVALIDKEY", "secret_key": "INVALIDSECRET"}, + } + ) + def test_profile_credentials(bucket): + """Test that profile-based credentials work without environment variables""" + print("\n=== Testing Profile-Based Credentials ===") + + store_url = make_s3_url(bucket, profile="valid") + + # Verify store info works with profile credentials (no env vars) + client.succeed(f"HOME=/root nix store info --store '{store_url}' >&2") + + # Verify we can copy from the store using profile + verify_packages_in_store(client, PKGS['A'], should_exist=False) + client.succeed(f"HOME=/root nix copy --no-check-sigs --from '{store_url}' {PKGS['A']}") + verify_packages_in_store(client, PKGS['A']) + + # Clean up the package we just copied so we can test invalid profile + client.succeed(f"nix store delete --ignore-liveness {PKGS['A']}") + verify_packages_in_store(client, PKGS['A'], should_exist=False) + + # Verify invalid profile fails when trying to copy + invalid_url = make_s3_url(bucket, profile="invalid") + client.fail(f"HOME=/root nix copy --no-check-sigs --from '{invalid_url}' {PKGS['A']} 2>&1") + + @setup_s3( + populate_bucket=[PKGS['A']], + profiles={ + "wrong": {"access_key": "WRONGKEY", "secret_key": "WRONGSECRET"}, + } + ) + def test_env_vars_precedence(bucket): + """Test that environment variables take precedence over profile credentials""" + print("\n=== Testing Environment Variables Precedence ===") + + # Use profile with wrong credentials, but provide correct creds via env vars + store_url = make_s3_url(bucket, profile="wrong") + + # Ensure package is not in client store + verify_packages_in_store(client, PKGS['A'], should_exist=False) + + # This should succeed because env vars (correct) override profile (wrong) + output = client.succeed( + f"HOME=/root {ENV_WITH_CREDS} nix copy --no-check-sigs --debug --from '{store_url}' {PKGS['A']} 2>&1" + ) + + # Verify the credential chain shows Environment provider was added + if "Added AWS Environment Credential Provider" not in output: + print("Debug output:") + print(output) + raise Exception("Expected Environment provider to be added to chain") + + # Clean up the package so we can test again without env vars + client.succeed(f"nix store delete --ignore-liveness {PKGS['A']}") + verify_packages_in_store(client, PKGS['A'], should_exist=False) + + # Without env vars, same URL should fail (proving profile creds are actually wrong) + client.fail(f"HOME=/root nix copy --no-check-sigs --from '{store_url}' {PKGS['A']} 2>&1") + + @setup_s3( + populate_bucket=[PKGS['A']], + profiles={ + "testprofile": {"access_key": ACCESS_KEY, "secret_key": SECRET_KEY}, + } + ) + def test_credential_provider_chain(bucket): + """Test that debug logging shows which providers are added to the chain""" + print("\n=== Testing Credential Provider Chain Logging ===") + + store_url = make_s3_url(bucket, profile="testprofile") + + output = client.succeed( + f"HOME=/root nix store info --debug --store '{store_url}' 2>&1" + ) + + # For a named profile, we expect to see these providers in the chain + expected_providers = ["Environment", "Profile", "IMDS"] + for provider in expected_providers: + msg = f"Added AWS {provider} Credential Provider to chain for profile 'testprofile'" + if msg not in output: + print("Debug output:") + print(output) + raise Exception(f"Expected to find: {msg}") + + # SSO should be skipped (no SSO config for this profile) + if "Skipped AWS SSO Credential Provider for profile 'testprofile'" not in output: + print("Debug output:") + print(output) + raise Exception("Expected SSO provider to be skipped") + # ============================================================================ # Main Test Execution # ============================================================================ diff --git a/tests/nixos/sourcehut-flakes.nix b/tests/nixos/sourcehut-flakes.nix index 3f05130d6aab..5b40866d1fa8 100644 --- a/tests/nixos/sourcehut-flakes.nix +++ b/tests/nixos/sourcehut-flakes.nix @@ -119,7 +119,6 @@ in virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - experimental-features = nix-command flakes flake-registry = https://git.sr.ht/~NixOS/flake-registry/blob/master/flake-registry.json ''; environment.systemPackages = [ pkgs.jq ]; diff --git a/tests/nixos/tarball-flakes.nix b/tests/nixos/tarball-flakes.nix index 26c20cb1aef4..ab9b200db269 100644 --- a/tests/nixos/tarball-flakes.nix +++ b/tests/nixos/tarball-flakes.nix @@ -61,7 +61,6 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; @@ -99,7 +98,6 @@ in # Check that fetching fails if we provide incorrect attributes. machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?rev=493300eb13ae6fb387fbd47bf54a85915acc31c0") - machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?revCount=789") machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?narHash=sha256-tbudgBSg+bHWHiHnlteNzN8TUvI80ygS9IULh4rklEw=") ''; diff --git a/tests/repl-completion.nix b/tests/repl-completion.nix index 9ea45a7b1b03..29a48738f8b3 100644 --- a/tests/repl-completion.nix +++ b/tests/repl-completion.nix @@ -15,7 +15,7 @@ runCommand "repl-completion" ]; expectScript = '' # Regression https://github.com/NixOS/nix/pull/10778 - spawn nix repl --offline --extra-experimental-features nix-command + spawn nix repl --offline expect "nix-repl>" send "foo = import ./does-not-exist.nix\n" expect "nix-repl>"