diff --git a/.gitignore b/.gitignore index 337a7c154..9c4691240 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,7 @@ /tests/functional/lang/*.err /tests/functional/lang/*.ast -outputs/ +/outputs *~ diff --git a/.mergify.yml b/.mergify.yml index 5b03feca0..021157eb9 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -106,3 +106,14 @@ pull_request_rules: labels: - automatic backport - merge-queue + + - name: backport patches to 2.26 + conditions: + - label=backport 2.26-maintenance + actions: + backport: + branches: + - "2.26-maintenance" + labels: + - automatic backport + - merge-queue diff --git a/.version b/.version index 3953e8ad5..f0465234b 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.26.3 +2.27.1 diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js index 3a86ae407..3648ad898 100644 --- a/doc/manual/redirects.js +++ b/doc/manual/redirects.js @@ -342,6 +342,9 @@ const redirects = { "scoping-rules": "scoping.html", "string-literal": "string-literals.html", }, + "language/derivations.md": { + "builder-execution": "store/drv/building.md#builder-execution", + }, "installation/installing-binary.html": { "linux": "uninstall.html#linux", "macos": "uninstall.html#macos", @@ -368,6 +371,7 @@ const redirects = { "glossary.html": { "gloss-local-store": "store/types/local-store.html", "gloss-chroot-store": "store/types/local-store.html", + "gloss-content-addressed-derivation": "#gloss-content-addressing-derivation", }, }; diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index fa14da872..8d6ad9f93 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -17,6 +17,11 @@ - [Store Object](store/store-object.md) - [Content-Addressing Store Objects](store/store-object/content-address.md) - [Store Path](store/store-path.md) + - [Store Derivation and Deriving Path](store/derivation/index.md) + - [Derivation Outputs and Types of Derivations](store/derivation/outputs/index.md) + - [Content-addressing derivation outputs](store/derivation/outputs/content-address.md) + - [Input-addressing derivation outputs](store/derivation/outputs/input-address.md) + - [Building](store/building.md) - [Store Types](store/types/index.md) {{#include ./store/types/SUMMARY.md}} - [Nix Language](language/index.md) @@ -126,4 +131,5 @@ - [Release 1.0.0 (2025-??-??)](release-notes-determinate/rl-1.0.0.md) - [Nix Release Notes](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} + - [Release 2.27 (2025-03-03)](release-notes/rl-2.27.md) - [Release 2.26 (2025-01-22)](release-notes/rl-2.26.md) diff --git a/doc/manual/source/advanced-topics/distributed-builds.md b/doc/manual/source/advanced-topics/distributed-builds.md index 66e371888..464b87d6e 100644 --- a/doc/manual/source/advanced-topics/distributed-builds.md +++ b/doc/manual/source/advanced-topics/distributed-builds.md @@ -20,7 +20,7 @@ For a local machine to forward a build to a remote machine, the remote machine m ## Testing -To test connecting to a remote Nix instance (in this case `mac`), run: +To test connecting to a remote [Nix instance] (in this case `mac`), run: ```console nix store info --store ssh://username@mac @@ -106,3 +106,5 @@ file included in `builders` via the syntax `@/path/to/file`. For example, causes the list of machines in `/etc/nix/machines` to be included. (This is the default.) + +[Nix instance]: @docroot@/glossary.md#gloss-nix-instance \ No newline at end of file diff --git a/doc/manual/source/architecture/architecture.md b/doc/manual/source/architecture/architecture.md index 867a9c992..cbc469355 100644 --- a/doc/manual/source/architecture/architecture.md +++ b/doc/manual/source/architecture/architecture.md @@ -69,7 +69,7 @@ It can also execute build plans to produce new data, which are made available to A build plan itself is a series of *build tasks*, together with their build inputs. > **Important** -> A build task in Nix is called [derivation](@docroot@/glossary.md#gloss-derivation). +> A build task in Nix is called [store derivation](@docroot@/glossary.md#gloss-store-derivation). Each build task has a special build input executed as *build instructions* in order to perform the build. The result of a build task can be input to another build task. diff --git a/doc/manual/source/command-ref/nix-env/install.md b/doc/manual/source/command-ref/nix-env/install.md index b6a71e8bd..26a32aa6b 100644 --- a/doc/manual/source/command-ref/nix-env/install.md +++ b/doc/manual/source/command-ref/nix-env/install.md @@ -22,11 +22,11 @@ It is based on the current generation of the active [profile](@docroot@/command- The arguments *args* map to store paths in a number of possible ways: -- By default, *args* is a set of [derivation] names denoting derivations in the default Nix expression. +- By default, *args* is a set of names denoting derivations in the default Nix expression. These are [realised], and the resulting output paths are installed. Currently installed derivations with a name equal to the name of a derivation being added are removed unless the option `--preserve-installed` is specified. - [derivation]: @docroot@/glossary.md#gloss-derivation + [derivation expression]: @docroot@/glossary.md#gloss-derivation-expression [realised]: @docroot@/glossary.md#gloss-realise If there are multiple derivations matching a name in *args* that @@ -65,11 +65,11 @@ The arguments *args* map to store paths in a number of possible ways: This can be used to override the priority of the derivations being installed. This is useful if *args* are [store paths], which don't have any priority information. -- If *args* are [store derivations](@docroot@/glossary.md#gloss-store-derivation), then these are [realised], and the resulting output paths are installed. +- If *args* are [store paths] that point to [store derivations][store derivation], then those store derivations are [realised], and the resulting output paths are installed. -- If *args* are [store paths] that are not store derivations, then these are [realised] and installed. +- If *args* are [store paths] that do not point to store derivations, then these are [realised] and installed. -- By default all [outputs](@docroot@/language/derivations.md#attr-outputs) are installed for each [derivation]. +- By default all [outputs](@docroot@/language/derivations.md#attr-outputs) are installed for each [store derivation]. This can be overridden by adding a `meta.outputsToInstall` attribute on the derivation listing a subset of the output names. Example: @@ -121,6 +121,8 @@ The arguments *args* map to store paths in a number of possible ways: manifest.nix ``` +[store derivation]: @docroot@/glossary.md#gloss-store-derivation + # Options - `--prebuilt-only` / `-b` diff --git a/doc/manual/source/command-ref/nix-env/query.md b/doc/manual/source/command-ref/nix-env/query.md index c67794ed5..bde9b3820 100644 --- a/doc/manual/source/command-ref/nix-env/query.md +++ b/doc/manual/source/command-ref/nix-env/query.md @@ -125,7 +125,10 @@ derivation is shown unless `--no-name` is specified. - `--drv-path` - Print the path of the [store derivation](@docroot@/glossary.md#gloss-store-derivation). + Print the [store path] to the [store derivation]. + + [store path]: @docroot@/glossary.md#gloss-store-path + [store derivation]: @docroot@/glossary.md#gloss-derivation - `--out-path` diff --git a/doc/manual/source/command-ref/nix-hash.md b/doc/manual/source/command-ref/nix-hash.md index f249c2b84..0860f312d 100644 --- a/doc/manual/source/command-ref/nix-hash.md +++ b/doc/manual/source/command-ref/nix-hash.md @@ -67,7 +67,7 @@ md5sum`. - `--type` *hashAlgo* Use the specified cryptographic hash algorithm, which can be one of - `md5`, `sha1`, `sha256`, and `sha512`. + `blake3`, `md5`, `sha1`, `sha256`, and `sha512`. - `--to-base16` diff --git a/doc/manual/source/command-ref/nix-instantiate.md b/doc/manual/source/command-ref/nix-instantiate.md index 487ef8f10..38454515d 100644 --- a/doc/manual/source/command-ref/nix-instantiate.md +++ b/doc/manual/source/command-ref/nix-instantiate.md @@ -42,8 +42,8 @@ standard input. - `--eval` Just parse and evaluate the input files, and print the resulting - values on standard output. No instantiation of store derivations - takes place. + values on standard output. + Store derivations are not serialized and written to the store, but instead just hashed and discarded. > **Warning** > diff --git a/doc/manual/source/command-ref/nix-prefetch-url.md b/doc/manual/source/command-ref/nix-prefetch-url.md index ffab94b8a..19322ec8e 100644 --- a/doc/manual/source/command-ref/nix-prefetch-url.md +++ b/doc/manual/source/command-ref/nix-prefetch-url.md @@ -42,7 +42,7 @@ the path of the downloaded file in the Nix store is also printed. - `--type` *hashAlgo* Use the specified cryptographic hash algorithm, - which can be one of `md5`, `sha1`, `sha256`, and `sha512`. + which can be one of `blake3`, `md5`, `sha1`, `sha256`, and `sha512`. The default is `sha256`. - `--print-path` diff --git a/doc/manual/source/command-ref/nix-store/realise.md b/doc/manual/source/command-ref/nix-store/realise.md index a899758df..240685ce5 100644 --- a/doc/manual/source/command-ref/nix-store/realise.md +++ b/doc/manual/source/command-ref/nix-store/realise.md @@ -15,7 +15,7 @@ Each of *paths* is processed as follows: 1. If it is not [valid], substitute the store derivation file itself. 2. Realise its [output paths]: - Try to fetch from [substituters] the [store objects] associated with the output paths in the store derivation's [closure]. - - With [content-addressed derivations] (experimental): + - With [content-addressing derivations] (experimental): Determine the output paths to realise by querying content-addressed realisation entries in the [Nix database]. - For any store paths that cannot be substituted, produce the required store objects: 1. Realise all outputs of the derivation's dependencies @@ -32,7 +32,7 @@ If no substitutes are available and no store derivation is given, realisation fa [store objects]: @docroot@/store/store-object.md [closure]: @docroot@/glossary.md#gloss-closure [substituters]: @docroot@/command-ref/conf-file.md#conf-substituters -[content-addressed derivations]: @docroot@/development/experimental-features.md#xp-feature-ca-derivations +[content-addressing derivations]: @docroot@/development/experimental-features.md#xp-feature-ca-derivations [Nix database]: @docroot@/glossary.md#gloss-nix-database The resulting paths are printed on standard output. diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index 6eea1970e..49cc9d568 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -11,7 +11,7 @@ This shell also adds `./outputs/bin/nix` to your `$PATH` so you can run `nix` im To get a shell with one of the other [supported compilation environments](#compilation-environments): ```console -$ nix develop .#native-clangStdenvPackages +$ nix develop .#native-clangStdenv ``` > **Note** @@ -93,11 +93,13 @@ It is useful to perform multiple cross and native builds on the same source tree for example to ensure that better support for one platform doesn't break the build for another. Meson thankfully makes this very easy by confining all build products to the build directory --- one simple shares the source directory between multiple build directories, each of which contains the build for Nix to a different platform. -Nixpkgs's `configurePhase` always chooses `build` in the current directory as the name and location of the build. -This makes having multiple build directories slightly more inconvenient. -The good news is that Meson/Ninja seem to cope well with relocating the build directory after it is created. +Here's how to do that: -Here's how to do that +1. Instruct Nixpkgs's infra where we want Meson to put its build directory + + ```bash + mesonBuildDir=build-my-variant-name + ``` 1. Configure as usual @@ -105,24 +107,12 @@ Here's how to do that configurePhase ``` -2. Rename the build directory - - ```bash - cd .. # since `configurePhase` cd'd inside - mv build build-linux # or whatever name we want - cd build-linux - ``` - 3. Build as usual ```bash buildPhase ``` -> **N.B.** -> [`nixpkgs#335818`](https://github.com/NixOS/nixpkgs/issues/335818) tracks giving `mesonConfigurePhase` proper support for custom build directories. -> When it is fixed, we can simplify these instructions and then remove this notice. - ## System type Nix uses a string with the following format to identify the *system type* or *platform* it runs on: @@ -179,7 +169,8 @@ See [supported compilation environments](#compilation-environments) and instruct To use the LSP with your editor, you will want a `compile_commands.json` file telling `clangd` how we are compiling the code. Meson's configure always produces this inside the build directory. -Configure your editor to use the `clangd` from the `.#native-clangStdenvPackages` shell. You can do that either by running it inside the development shell, or by using [nix-direnv](https://github.com/nix-community/nix-direnv) and [the appropriate editor plugin](https://github.com/direnv/direnv/wiki#editor-integration). +Configure your editor to use the `clangd` from the `.#native-clangStdenv` shell. +You can do that either by running it inside the development shell, or by using [nix-direnv](https://github.com/nix-community/nix-direnv) and [the appropriate editor plugin](https://github.com/direnv/direnv/wiki#editor-integration). > **Note** > @@ -195,6 +186,8 @@ You may run the formatters as a one-off using: ./maintainers/format.sh ``` +### Pre-commit hooks + If you'd like to run the formatters before every commit, install the hooks: ``` @@ -209,3 +202,30 @@ If it fails, run `git add --patch` to approve the suggestions _and commit again_ To refresh pre-commit hook's config file, do the following: 1. Exit the development shell and start it again by running `nix develop`. 2. If you also use the pre-commit hook, also run `pre-commit-hooks-install` again. + +### VSCode + +Insert the following json into your `.vscode/settings.json` file to configure `nixfmt`. +This will be picked up by the _Format Document_ command, `"editor.formatOnSave"`, etc. + +```json +{ + "nix.formatterPath": "nixfmt", + "nix.serverSettings": { + "nixd": { + "formatting": { + "command": [ + "nixfmt" + ], + }, + }, + "nil": { + "formatting": { + "command": [ + "nixfmt" + ], + }, + }, + }, +} +``` diff --git a/doc/manual/source/development/debugging.md b/doc/manual/source/development/debugging.md index ce623110b..98456841a 100644 --- a/doc/manual/source/development/debugging.md +++ b/doc/manual/source/development/debugging.md @@ -2,6 +2,8 @@ This section shows how to build and debug Nix with debug symbols enabled. +Additionally, see [Testing Nix](./testing.md) for further instructions on how to debug Nix in the context of a unit test or functional test. + ## Building Nix with Debug Symbols In the development shell, set the `mesonBuildType` environment variable to `debug` before configuring the build: @@ -13,6 +15,15 @@ In the development shell, set the `mesonBuildType` environment variable to `debu Then, proceed to build Nix as described in [Building Nix](./building.md). This will build Nix with debug symbols, which are essential for effective debugging. +It is also possible to build without debugging for faster build: + +```console +[nix-shell]$ NIX_HARDENING_ENABLE=$(printLines $NIX_HARDENING_ENABLE | grep -v fortify) +[nix-shell]$ export mesonBuildType=debug +``` + +(The first line is needed because `fortify` hardening requires at least some optimization.) + ## Debugging the Nix Binary Obtain your preferred debugger within the development shell: diff --git a/doc/manual/source/development/testing.md b/doc/manual/source/development/testing.md index d582ce4b4..d0c3a1c78 100644 --- a/doc/manual/source/development/testing.md +++ b/doc/manual/source/development/testing.md @@ -87,7 +87,11 @@ A environment variables that Google Test accepts are also worth knowing: This is used to avoid logging passing tests. -Putting the two together, one might run +3. [`GTEST_BREAK_ON_FAILURE`](https://google.github.io/googletest/advanced.html#turning-assertion-failures-into-break-points) + + This is used to create a debugger breakpoint when an assertion failure occurs. + +Putting the first two together, one might run ```bash GTEST_BRIEF=1 GTEST_FILTER='ErrorTraceTest.*' meson test nix-expr-tests -v @@ -95,6 +99,22 @@ GTEST_BRIEF=1 GTEST_FILTER='ErrorTraceTest.*' meson test nix-expr-tests -v for short but comprensive output. +### Debugging tests + +For debugging, it is useful to combine the third option above with Meson's [`--gdb`](https://mesonbuild.com/Unit-tests.html#other-test-options) flag: + +```bash +GTEST_BRIEF=1 GTEST_FILTER='Group.my-failing-test' meson test nix-expr-tests --gdb +``` + +This will: + +1. Run the unit test with GDB + +2. Run just `Group.my-failing-test` + +3. Stop the program when the test fails, allowing the user to then issue arbitrary commands to GDB. + ### Characterisation testing { #characaterisation-testing-unit } See [functional characterisation testing](#characterisation-testing-functional) for a broader discussion of characterisation testing. @@ -144,7 +164,7 @@ $ checkPhase Sometimes it is useful to group related tests so they can be easily run together without running the entire test suite. Each test group is in a subdirectory of `tests`. -For example, `tests/functional/ca/meson.build` defines a `ca` test group for content-addressed derivation outputs. +For example, `tests/functional/ca/meson.build` defines a `ca` test group for content-addressing derivation outputs. That test group can be run like this: @@ -213,10 +233,10 @@ edit it like so: bar ``` -Then, running the test with `./mk/debug-test.sh` will drop you into GDB once the script reaches that point: +Then, running the test with [`--interactive`](https://mesonbuild.com/Unit-tests.html#other-test-options) will prevent Meson from hijacking the terminal so you can drop you into GDB once the script reaches that point: ```shell-session -$ ./mk/debug-test.sh tests/functional/${testName}.sh +$ meson test ${testName} --interactive ... + gdb blash blub GNU gdb (GDB) 12.1 diff --git a/doc/manual/source/glossary.md b/doc/manual/source/glossary.md index fa357ece3..6a7501200 100644 --- a/doc/manual/source/glossary.md +++ b/doc/manual/source/glossary.md @@ -1,5 +1,13 @@ # Glossary +- [build system]{#gloss-build-system} + + Generic term for software that facilitates the building of software by automating the invocation of compilers, linkers, and other tools. + + Nix can be used as a generic build system. + It has no knowledge of any particular programming language or toolchain. + These details are specified in [derivation expressions](#gloss-derivation-expression). + - [content address]{#gloss-content-address} A @@ -13,37 +21,45 @@ - [Content-Addressing File System Objects](@docroot@/store/file-system-object/content-address.md) - [Content-Addressing Store Objects](@docroot@/store/store-object/content-address.md) - - [content-addressed derivation](#gloss-content-addressed-derivation) + - [content-addressing derivation](#gloss-content-addressing-derivation) Software Heritage's writing on [*Intrinsic and Extrinsic identifiers*](https://www.softwareheritage.org/2020/07/09/intrinsic-vs-extrinsic-identifiers) is also a good introduction to the value of content-addressing over other referencing schemes. Besides content addressing, the Nix store also uses [input addressing](#gloss-input-addressed-store-object). -- [derivation]{#gloss-derivation} +- [content-addressed storage]{#gloss-content-addressed-store} - A description of a build task. The result of a derivation is a - store object. Derivations declared in Nix expressions are specified - using the [`derivation` primitive](./language/derivations.md). These are - translated into low-level *store derivations* (implicitly by - `nix-build`, or explicitly by `nix-instantiate`). - - [derivation]: #gloss-derivation + The industry term for storage and retrieval systems using [content addressing](#gloss-content-address). A Nix store also has [input addressing](#gloss-input-addressed-store-object), and metadata. - [store derivation]{#gloss-store-derivation} - A [derivation] represented as a `.drv` file in the [store]. - It has a [store path], like any [store object]. - It is the [instantiated][instantiate] form of a derivation. - - Example: `/nix/store/g946hcz4c8mdvq2g8vxx42z51qb71rvp-git-2.38.1.drv` - - See [`nix derivation show`](./command-ref/new-cli/nix3-derivation-show.md) (experimental) for displaying the contents of store derivations. + A single build task. + See [Store Derivation](@docroot@/store/derivation/index.md#store-derivation) for details. [store derivation]: #gloss-store-derivation +- [derivation path]{#gloss-derivation-path} + + A [store path] which uniquely identifies a [store derivation]. + + See [Referencing Store Derivations](@docroot@/store/derivation/index.md#derivation-path) for details. + + Not to be confused with [deriving path]. + + [derivation path]: #gloss-derivation-path + +- [derivation expression]{#gloss-derivation-expression} + + A description of a [store derivation] in the Nix language. + The output(s) of a derivation are store objects. + Derivations are typically specified in Nix expressions using the [`derivation` primitive](./language/derivations.md). + These are translated into store layer *derivations* (implicitly by `nix-env` and `nix-build`, or explicitly by `nix-instantiate`). + + [derivation expression]: #gloss-derivation-expression + - [instantiate]{#gloss-instantiate}, instantiation - Save an evaluated [derivation] as a [store derivation] in the Nix [store]. + Translate a [derivation expression] into a [store derivation]. See [`nix-instantiate`](./command-ref/nix-instantiate.md), which produces a store derivation from a Nix expression that evaluates to a derivation. @@ -55,7 +71,7 @@ This can be achieved by: - Fetching a pre-built [store object] from a [substituter] - - Running the [`builder`](@docroot@/language/derivations.md#attr-builder) executable as specified in the corresponding [derivation] + - Running the [`builder`](@docroot@/language/derivations.md#attr-builder) executable as specified in the corresponding [store derivation] - Delegating to a [remote machine](@docroot@/command-ref/conf-file.md#conf-builders) and retrieving the outputs @@ -65,7 +81,7 @@ [realise]: #gloss-realise -- [content-addressed derivation]{#gloss-content-addressed-derivation} +- [content-addressing derivation]{#gloss-content-addressing-derivation} A derivation which has the [`__contentAddressed`](./language/advanced-attributes.md#adv-attr-__contentAddressed) @@ -73,7 +89,7 @@ - [fixed-output derivation]{#gloss-fixed-output-derivation} (FOD) - A [derivation] where a cryptographic hash of the [output] is determined in advance using the [`outputHash`](./language/advanced-attributes.md#adv-attr-outputHash) attribute, and where the [`builder`](@docroot@/language/derivations.md#attr-builder) executable has access to the network. + A [store derivation] where a cryptographic hash of the [output] is determined in advance using the [`outputHash`](./language/advanced-attributes.md#adv-attr-outputHash) attribute, and where the [`builder`](@docroot@/language/derivations.md#attr-builder) executable has access to the network. - [store]{#gloss-store} @@ -84,6 +100,12 @@ [store]: #gloss-store +- [Nix instance]{#gloss-nix-instance} + + 1. An installation of Nix, which includes the presence of a [store], and the Nix package manager which operates on that store. + A local Nix installation and a [remote builder](@docroot@/advanced-topics/distributed-builds.md) are two examples of Nix instances. + 2. A running Nix process, such as the `nix` command. + - [binary cache]{#gloss-binary-cache} A *binary cache* is a Nix store which uses a different format: its @@ -130,7 +152,7 @@ - [input-addressed store object]{#gloss-input-addressed-store-object} A store object produced by building a - non-[content-addressed](#gloss-content-addressed-derivation), + non-[content-addressed](#gloss-content-addressing-derivation), non-[fixed-output](#gloss-fixed-output-derivation) derivation. @@ -138,7 +160,7 @@ A [store object] which is [content-addressed](#gloss-content-address), i.e. whose [store path] is determined by its contents. - This includes derivations, the outputs of [content-addressed derivations](#gloss-content-addressed-derivation), and the outputs of [fixed-output derivations](#gloss-fixed-output-derivation). + This includes derivations, the outputs of [content-addressing derivations](#gloss-content-addressing-derivation), and the outputs of [fixed-output derivations](#gloss-fixed-output-derivation). See [Content-Addressing Store Objects](@docroot@/store/store-object/content-address.md) for details. @@ -188,7 +210,7 @@ > > The contents of a `.nix` file form a Nix expression. - Nix expressions specify [derivations][derivation], which are [instantiated][instantiate] into the Nix store as [store derivations][store derivation]. + Nix expressions specify [derivation expressions][derivation expression], which are [instantiated][instantiate] into the Nix store as [store derivations][store derivation]. These derivations can then be [realised][realise] to produce [outputs][output]. > **Example** @@ -216,7 +238,7 @@ directly or indirectly “reachable” from that store path; that is, it’s the closure of the path under the *references* relation. For a package, the closure of its derivation is equivalent to the - build-time dependencies, while the closure of its output path is + build-time dependencies, while the closure of its [output path] is equivalent to its runtime dependencies. For correct deployment it is necessary to deploy whole closures, since otherwise at runtime files could be missing. The command `nix-store --query --requisites ` prints out @@ -230,14 +252,14 @@ - [output]{#gloss-output} - A [store object] produced by a [derivation]. + A [store object] produced by a [store derivation]. See [the `outputs` argument to the `derivation` function](@docroot@/language/derivations.md#attr-outputs) for details. [output]: #gloss-output - [output path]{#gloss-output-path} - The [store path] to the [output] of a [derivation]. + The [store path] to the [output] of a [store derivation]. [output path]: #gloss-output-path @@ -246,14 +268,11 @@ - [deriving path]{#gloss-deriving-path} - Deriving paths are a way to refer to [store objects][store object] that ar not yet [realised][realise]. - This is necessary because, in general and particularly for [content-addressed derivations][content-addressed derivation], the [output path] of an [output] is not known in advance. - There are two forms: + Deriving paths are a way to refer to [store objects][store object] that might not yet be [realised][realise]. - - *constant*: just a [store path] - It can be made [valid][validity] by copying it into the store: from the evaluator, command line interface or another store. + See [Deriving Path](./store/derivation/index.md#deriving-path) for details. - - *output*: a pair of a [store path] to a [derivation] and an [output] name. + Not to be confused with [derivation path]. - [deriver]{#gloss-deriver} diff --git a/doc/manual/source/language/advanced-attributes.md b/doc/manual/source/language/advanced-attributes.md index 51b83fc8a..0722386c4 100644 --- a/doc/manual/source/language/advanced-attributes.md +++ b/doc/manual/source/language/advanced-attributes.md @@ -99,8 +99,8 @@ Derivations can declare some infrequently used optional attributes. to make it use the proxy server configuration specified by the user in the environment variables `http_proxy` and friends. - This attribute is only allowed in *fixed-output derivations* (see - below), where impurities such as these are okay since (the hash + This attribute is only allowed in [fixed-output derivations][fixed-output derivation], + where impurities such as these are okay since (the hash of) the output is known in advance. It is ignored for all other derivations. @@ -119,135 +119,6 @@ Derivations can declare some infrequently used optional attributes. [`impure-env`](@docroot@/command-ref/conf-file.md#conf-impure-env) configuration setting. - - [`outputHash`]{#adv-attr-outputHash}; [`outputHashAlgo`]{#adv-attr-outputHashAlgo}; [`outputHashMode`]{#adv-attr-outputHashMode}\ - These attributes declare that the derivation is a so-called *fixed-output derivation* (FOD), which means that a cryptographic hash of the output is already known in advance. - - As opposed to regular derivations, the [`builder`] executable of a fixed-output derivation has access to the network. - Nix computes a cryptographic hash of its output and compares that to the hash declared with these attributes. - If there is a mismatch, the derivation fails. - - The rationale for fixed-output derivations is derivations such as - those produced by the `fetchurl` function. This function downloads a - file from a given URL. To ensure that the downloaded file has not - been modified, the caller must also specify a cryptographic hash of - the file. For example, - - ```nix - fetchurl { - url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz"; - sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; - } - ``` - - It sometimes happens that the URL of the file changes, e.g., because - servers are reorganised or no longer available. We then must update - the call to `fetchurl`, e.g., - - ```nix - fetchurl { - url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz"; - sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; - } - ``` - - If a `fetchurl` derivation was treated like a normal derivation, the - output paths of the derivation and *all derivations depending on it* - would change. For instance, if we were to change the URL of the - Glibc source distribution in Nixpkgs (a package on which almost all - other packages depend) massive rebuilds would be needed. This is - unfortunate for a change which we know cannot have a real effect as - it propagates upwards through the dependency graph. - - For fixed-output derivations, on the other hand, the name of the - output path only depends on the `outputHash*` and `name` attributes, - while all other attributes are ignored for the purpose of computing - the output path. (The `name` attribute is included because it is - part of the path.) - - As an example, here is the (simplified) Nix expression for - `fetchurl`: - - ```nix - { stdenv, curl }: # The curl program is used for downloading. - - { url, sha256 }: - - stdenv.mkDerivation { - name = baseNameOf (toString url); - builder = ./builder.sh; - buildInputs = [ curl ]; - - # This is a fixed-output derivation; the output must be a regular - # file with SHA256 hash sha256. - outputHashMode = "flat"; - outputHashAlgo = "sha256"; - outputHash = sha256; - - inherit url; - } - ``` - - The `outputHash` attribute must be a string containing the hash in either hexadecimal or "nix32" encoding, or following the format for integrity metadata as defined by [SRI](https://www.w3.org/TR/SRI/). - The "nix32" encoding is an adaptation of base-32 encoding. - The [`convertHash`](@docroot@/language/builtins.md#builtins-convertHash) function shows how to convert between different encodings, and the [`nix-hash` command](../command-ref/nix-hash.md) has information about obtaining the hash for some contents, as well as converting to and from encodings. - - The `outputHashAlgo` attribute specifies the hash algorithm used to compute the hash. - It can currently be `"sha1"`, `"sha256"`, `"sha512"`, or `null`. - `outputHashAlgo` can only be `null` when `outputHash` follows the SRI format. - - The `outputHashMode` attribute determines how the hash is computed. - It must be one of the following values: - - - [`"flat"`](@docroot@/store/store-object/content-address.md#method-flat) - - This is the default. - - - [`"recursive"` or `"nar"`](@docroot@/store/store-object/content-address.md#method-nix-archive) - - > **Compatibility** - > - > `"recursive"` is the traditional way of indicating this, - > and is supported since 2005 (virtually the entire history of Nix). - > `"nar"` is more clear, and consistent with other parts of Nix (such as the CLI), - > however support for it is only added in Nix version 2.21. - - - [`"text"`](@docroot@/store/store-object/content-address.md#method-text) - - > **Warning** - > - > The use of this method for derivation outputs is part of the [`dynamic-derivations`][xp-feature-dynamic-derivations] experimental feature. - - - [`"git"`](@docroot@/store/store-object/content-address.md#method-git) - - > **Warning** - > - > This method is part of the [`git-hashing`][xp-feature-git-hashing] experimental feature. - - - [`__contentAddressed`]{#adv-attr-__contentAddressed} - - > **Warning** - > This attribute is part of an [experimental feature](@docroot@/development/experimental-features.md). - > - > To use this attribute, you must enable the - > [`ca-derivations`][xp-feature-ca-derivations] experimental feature. - > For example, in [nix.conf](../command-ref/conf-file.md) you could add: - > - > ``` - > extra-experimental-features = ca-derivations - > ``` - - If this attribute is set to `true`, then the derivation - outputs will be stored in a content-addressed location rather than the - traditional input-addressed one. - - Setting this attribute also requires setting - [`outputHashMode`](#adv-attr-outputHashMode) - and - [`outputHashAlgo`](#adv-attr-outputHashAlgo) - like for *fixed-output derivations* (see above). - - It also implicitly requires that the machine to build the derivation must have the `ca-derivations` [system feature](@docroot@/command-ref/conf-file.md#conf-system-features). - - [`passAsFile`]{#adv-attr-passAsFile}\ A list of names of attributes that should be passed via files rather than environment variables. For example, if you have @@ -370,6 +241,134 @@ Derivations can declare some infrequently used optional attributes. ensures that the derivation can only be built on a machine with the `kvm` feature. -[xp-feature-ca-derivations]: @docroot@/development/experimental-features.md#xp-feature-ca-derivations +## Setting the derivation type + +As discussed in [Derivation Outputs and Types of Derivations](@docroot@/store/derivation/outputs/index.md), there are multiples kinds of derivations / kinds of derivation outputs. +The choice of the following attributes determines which kind of derivation we are making. + +- [`__contentAddressed`] + +- [`outputHash`] + +- [`outputHashAlgo`] + +- [`outputHashMode`] + +The three types of derivations are chosen based on the following combinations of these attributes. +All other combinations are invalid. + +- [Input-addressing derivations](@docroot@/store/derivation/outputs/input-address.md) + + This is the default for `builtins.derivation`. + Nix only currently supports one kind of input-addressing, so no other information is needed. + + `__contentAddressed = false;` may also be included, but is not needed, and will trigger the experimental feature check. + +- [Fixed-output derivations][fixed-output derivation] + + All of [`outputHash`], [`outputHashAlgo`], and [`outputHashMode`]. + + + +- [(Floating) content-addressing derivations](@docroot@/store/derivation/outputs/content-address.md) + + Both [`outputHashAlgo`] and [`outputHashMode`], `__contentAddressed = true;`, and *not* `outputHash`. + + If an output hash was given, then the derivation output would be "fixed" not "floating". + +Here is more information on the `output*` attributes, and what values they may be set to: + + - [`outputHashMode`]{#adv-attr-outputHashMode} + + This specifies how the files of a content-addressing derivation output are digested to produce a content address. + + This works in conjunction with [`outputHashAlgo`](#adv-attr-outputHashAlgo). + Specifying one without the other is an error (unless [`outputHash` is also specified and includes its own hash algorithm as described below). + + The `outputHashMode` attribute determines how the hash is computed. + It must be one of the following values: + + - [`"flat"`](@docroot@/store/store-object/content-address.md#method-flat) + + This is the default. + + - [`"recursive"` or `"nar"`](@docroot@/store/store-object/content-address.md#method-nix-archive) + + > **Compatibility** + > + > `"recursive"` is the traditional way of indicating this, + > and is supported since 2005 (virtually the entire history of Nix). + > `"nar"` is more clear, and consistent with other parts of Nix (such as the CLI), + > however support for it is only added in Nix version 2.21. + + - [`"text"`](@docroot@/store/store-object/content-address.md#method-text) + + > **Warning** + > + > The use of this method for derivation outputs is part of the [`dynamic-derivations`][xp-feature-dynamic-derivations] experimental feature. + + - [`"git"`](@docroot@/store/store-object/content-address.md#method-git) + + > **Warning** + > + > This method is part of the [`git-hashing`][xp-feature-git-hashing] experimental feature. + + See [content-addressing store objects](@docroot@/store/store-object/content-address.md) for more information about the process this flag controls. + + - [`outputHashAlgo`]{#adv-attr-outputHashAlgo} + + This specifies the hash alorithm used to digest the [file system object] data of a content-addressing derivation output. + + This works in conjunction with [`outputHashMode`](#adv-attr-outputHashAlgo). + Specifying one without the other is an error (unless [`outputHash` is also specified and includes its own hash algorithm as described below). + + The `outputHashAlgo` attribute specifies the hash algorithm used to compute the hash. + It can currently be `"blake3"`, "sha1"`, `"sha256"`, `"sha512"`, or `null`. + + `outputHashAlgo` can only be `null` when `outputHash` follows the SRI format, because in that case the choice of hash algorithm is determined by `outputHash`. + + - [`outputHash`]{#adv-attr-outputHashAlgo}; [`outputHash`]{#adv-attr-outputHashMode}\ + + This will specify the output hash of the single output of a [fixed-output derivation]. + + The `outputHash` attribute must be a string containing the hash in either hexadecimal or "nix32" encoding, or following the format for integrity metadata as defined by [SRI](https://www.w3.org/TR/SRI/). + The "nix32" encoding is an adaptation of base-32 encoding. + + > **Note** + > + > The [`convertHash`](@docroot@/language/builtins.md#builtins-convertHash) function shows how to convert between different encodings. + > The [`nix-hash` command](../command-ref/nix-hash.md) has information about obtaining the hash for some contents, as well as converting to and from encodings. + + - [`__contentAddressed`]{#adv-attr-__contentAddressed} + + > **Warning** + > + > This attribute is part of an [experimental feature](@docroot@/development/experimental-features.md). + > + > To use this attribute, you must enable the + > [`ca-derivations`][xp-feature-ca-derivations] experimental feature. + > For example, in [nix.conf](../command-ref/conf-file.md) you could add: + > + > ``` + > extra-experimental-features = ca-derivations + > ``` + + This is a boolean with a default of `false`. + It determines whether the derivation is floating content-addressing. + +[`__contentAddressed`]: #adv-attr-__contentAddressed +[`outputHash`]: #adv-attr-outputHash +[`outputHashAlgo`]: #adv-attr-outputHashAlgo +[`outputHashMode`]: #adv-attr-outputHashMode + +[fixed-output derivation]: @docroot@/glossary.md#gloss-fixed-output-derivation +[file system object]: @docroot@/store/file-system-object.md +[store object]: @docroot@/store/store-object.md [xp-feature-dynamic-derivations]: @docroot@/development/experimental-features.md#xp-feature-dynamic-derivations [xp-feature-git-hashing]: @docroot@/development/experimental-features.md#xp-feature-git-hashing diff --git a/doc/manual/source/language/derivations.md b/doc/manual/source/language/derivations.md index 771b2bd91..43eec680b 100644 --- a/doc/manual/source/language/derivations.md +++ b/doc/manual/source/language/derivations.md @@ -1,9 +1,10 @@ # Derivations -The most important built-in function is `derivation`, which is used to describe a single derivation: -a specification for running an executable on precisely defined input files to repeatably produce output files at uniquely determined file system paths. +The most important built-in function is `derivation`, which is used to describe a single store-layer [store derivation]. +Consult the [store chapter](@docroot@/store/derivation/index.md) for what a store derivation is; +this section just concerns how to create one from the Nix language. -It takes as input an attribute set, the attributes of which specify the inputs to the process. +This builtin function takes as input an attribute set, the attributes of which specify the inputs to the process. It outputs an attribute set, and produces a [store derivation] as a side effect of evaluation. [store derivation]: @docroot@/glossary.md#gloss-store-derivation @@ -15,7 +16,7 @@ It outputs an attribute set, and produces a [store derivation] as a side effect - [`name`]{#attr-name} ([String](@docroot@/language/types.md#type-string)) A symbolic name for the derivation. - It is added to the [store path] of the corresponding [store derivation] as well as to its [output paths](@docroot@/glossary.md#gloss-output-path). + See [derivation outputs](@docroot@/store/derivation/index.md#outputs) for what this is affects. [store path]: @docroot@/store/store-path.md @@ -28,17 +29,12 @@ It outputs an attribute set, and produces a [store derivation] as a side effect > } > ``` > - > The store derivation's path will be `/nix/store/-hello.drv`. + > The derivation's path will be `/nix/store/-hello.drv`. > The [output](#attr-outputs) paths will be of the form `/nix/store/-hello[-]` - [`system`]{#attr-system} ([String](@docroot@/language/types.md#type-string)) - The system type on which the [`builder`](#attr-builder) executable is meant to be run. - - A necessary condition for Nix to build derivations locally is that the `system` attribute matches the current [`system` configuration option]. - It can automatically [build on other platforms](@docroot@/language/derivations.md#attr-builder) by forwarding build requests to other machines. - - [`system` configuration option]: @docroot@/command-ref/conf-file.md#conf-system + See [system](@docroot@/store/derivation/index.md#system). > **Example** > @@ -68,7 +64,7 @@ It outputs an attribute set, and produces a [store derivation] as a side effect - [`builder`]{#attr-builder} ([Path](@docroot@/language/types.md#type-path) | [String](@docroot@/language/types.md#type-string)) - Path to an executable that will perform the build. + See [builder](@docroot@/store/derivation/index.md#builder). > **Example** > @@ -117,7 +113,7 @@ It outputs an attribute set, and produces a [store derivation] as a side effect Default: `[ ]` - Command-line arguments to be passed to the [`builder`](#attr-builder) executable. + See [args](@docroot@/store/derivation/index.md#args). > **Example** > @@ -239,77 +235,3 @@ It outputs an attribute set, and produces a [store derivation] as a side effect passed as an empty string. - -## Builder execution - -The [`builder`](#attr-builder) is executed as follows: - -- A temporary directory is created under the directory specified by - `TMPDIR` (default `/tmp`) where the build will take place. The - current directory is changed to this directory. - -- The environment is cleared and set to the derivation attributes, as - specified above. - -- In addition, the following variables are set: - - - `NIX_BUILD_TOP` contains the path of the temporary directory for - this build. - - - Also, `TMPDIR`, `TEMPDIR`, `TMP`, `TEMP` are set to point to the - temporary directory. This is to prevent the builder from - accidentally writing temporary files anywhere else. Doing so - might cause interference by other processes. - - - `PATH` is set to `/path-not-set` to prevent shells from - initialising it to their built-in default value. - - - `HOME` is set to `/homeless-shelter` to prevent programs from - using `/etc/passwd` or the like to find the user's home - directory, which could cause impurity. Usually, when `HOME` is - set, it is used as the location of the home directory, even if - it points to a non-existent path. - - - `NIX_STORE` is set to the path of the top-level Nix store - directory (typically, `/nix/store`). - - - `NIX_ATTRS_JSON_FILE` & `NIX_ATTRS_SH_FILE` if `__structuredAttrs` - is set to `true` for the derivation. A detailed explanation of this - behavior can be found in the - [section about structured attrs](./advanced-attributes.md#adv-attr-structuredAttrs). - - - For each output declared in `outputs`, the corresponding - environment variable is set to point to the intended path in the - Nix store for that output. Each output path is a concatenation - of the cryptographic hash of all build inputs, the `name` - attribute and the output name. (The output name is omitted if - it’s `out`.) - -- If an output path already exists, it is removed. Also, locks are - acquired to prevent multiple Nix instances from performing the same - build at the same time. - -- A log of the combined standard output and error is written to - `/nix/var/log/nix`. - -- The builder is executed with the arguments specified by the - attribute `args`. If it exits with exit code 0, it is considered to - have succeeded. - -- The temporary directory is removed (unless the `-K` option was - specified). - -- If the build was successful, Nix scans each output path for - references to input paths by looking for the hash parts of the input - paths. Since these are potential runtime dependencies, Nix registers - them as dependencies of the output paths. - -- After the build, Nix sets the last-modified timestamp on all files - in the build result to 1 (00:00:01 1/1/1970 UTC), sets the group to - the default group, and sets the mode of the file to 0444 or 0555 - (i.e., read-only, with execute permission enabled if the file was - originally executable). Note that possible `setuid` and `setgid` - bits are cleared. Setuid and setgid programs are not currently - supported by Nix. This is because the Nix archives used in - deployment have no concept of ownership information, and because it - makes the build result dependent on the user performing the build. diff --git a/doc/manual/source/language/import-from-derivation.md b/doc/manual/source/language/import-from-derivation.md index e901f5bcf..f161c6fe3 100644 --- a/doc/manual/source/language/import-from-derivation.md +++ b/doc/manual/source/language/import-from-derivation.md @@ -71,8 +71,9 @@ Boxes are data structures, arrow labels are transformations. | evaluate | | | | | | | | | V | | | -| .------------. | | .------------------. | -| | derivation |----|-instantiate-|->| store derivation | | +| .------------. | | | +| | derivation | | | .------------------. | +| | expression |----|-instantiate-|->| store derivation | | | '------------' | | '------------------' | | | | | | | | | realise | diff --git a/doc/manual/source/language/string-interpolation.md b/doc/manual/source/language/string-interpolation.md index 27780dcbb..a503d5f04 100644 --- a/doc/manual/source/language/string-interpolation.md +++ b/doc/manual/source/language/string-interpolation.md @@ -22,9 +22,9 @@ Rather than writing "--with-freetype2-library=" + freetype + "/lib" ``` -(where `freetype` is a [derivation]), you can instead write +(where `freetype` is a [derivation expression]), you can instead write -[derivation]: @docroot@/glossary.md#gloss-derivation +[derivation expression]: @docroot@/glossary.md#gloss-derivation-expression ```nix "--with-freetype2-library=${freetype}/lib" @@ -148,7 +148,7 @@ An expression that is interpolated must evaluate to one of the following: - `__toString` must be a function that takes the attribute set itself and returns a string - `outPath` must be a string - This includes [derivations](./derivations.md) or [flake inputs](@docroot@/command-ref/new-cli/nix3-flake.md#flake-inputs) (experimental). + This includes [derivation expressions](./derivations.md) or [flake inputs](@docroot@/command-ref/new-cli/nix3-flake.md#flake-inputs) (experimental). A string interpolates to itself. diff --git a/doc/manual/source/protocols/derivation-aterm.md b/doc/manual/source/protocols/derivation-aterm.md index 1ba757ae0..99e3c2be6 100644 --- a/doc/manual/source/protocols/derivation-aterm.md +++ b/doc/manual/source/protocols/derivation-aterm.md @@ -1,6 +1,8 @@ # Derivation "ATerm" file format -For historical reasons, [derivations](@docroot@/glossary.md#gloss-store-derivation) are stored on-disk in [ATerm](https://homepages.cwi.nl/~daybuild/daily-books/technology/aterm-guide/aterm-guide.html) format. +For historical reasons, [store derivations][store derivation] are stored on-disk in [ATerm](https://homepages.cwi.nl/~daybuild/daily-books/technology/aterm-guide/aterm-guide.html) format. + +## The ATerm format used Derivations are serialised in one of the following formats: @@ -17,3 +19,20 @@ Derivations are serialised in one of the following formats: The only `version-string`s that are in use today are for [experimental features](@docroot@/development/experimental-features.md): - `"xp-dyn-drv"` for the [`dynamic-derivations`](@docroot@/development/experimental-features.md#xp-feature-dynamic-derivations) experimental feature. + +## Use for encoding to store object + +When derivation is encoded to a [store object] we make the following choices: + +- The store path name is the derivation name with `.drv` suffixed at the end + + Indeed, the ATerm format above does *not* contain the name of the derivation, on the assumption that a store path will also be provided out-of-band. + +- The derivation is content-addressed using the ["Text" method] of content-addressing derivations + +Currently we always encode derivations to store object using the ATerm format (and the previous two choices), +but we reserve the option to encode new sorts of derivations differently in the future. + +[store derivation]: @docroot@/glossary.md#gloss-store-derivation +[store object]: @docroot@/glossary.md#gloss-store-object +["Text" method]: @docroot@/store/store-object/content-address.md#method-text diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index 6af7c0dfb..414a862e8 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -32,6 +32,7 @@ is a JSON object with the following fields: For an output which will be [content addresed], the name of the hash algorithm used. Valid algorithm strings are: + - `blake3` - `md5` - `sha1` - `sha256` diff --git a/doc/manual/source/protocols/json/store-object-info.md b/doc/manual/source/protocols/json/store-object-info.md index fee415eef..4b029c40b 100644 --- a/doc/manual/source/protocols/json/store-object-info.md +++ b/doc/manual/source/protocols/json/store-object-info.md @@ -35,10 +35,10 @@ In other words, the same store object residing in different store could have dif * `deriver`: - If known, the path to the [derivation] from which this store object was produced. + If known, the path to the [store derivation] from which this store object was produced. Otherwise `null`. - [derivation]: @docroot@/glossary.md#gloss-store-derivation + [store derivation]: @docroot@/glossary.md#gloss-store-derivation * `registrationTime` (optional): diff --git a/doc/manual/source/protocols/store-path.md b/doc/manual/source/protocols/store-path.md index 8ec6f8201..9abd83f4f 100644 --- a/doc/manual/source/protocols/store-path.md +++ b/doc/manual/source/protocols/store-path.md @@ -53,7 +53,7 @@ where method of content addressing store objects, if the hash algorithm is [SHA-256]. Just like in the "Text" case, we can have the store objects referenced by their paths. - Additionally, we can have an optional `:self` label to denote self reference. + Additionally, we can have an optional `:self` label to denote self-reference. - ```ebnf | "output:" id diff --git a/doc/manual/source/release-notes/rl-0.8.md b/doc/manual/source/release-notes/rl-0.8.md index 626c0c92b..5ba6e0e72 100644 --- a/doc/manual/source/release-notes/rl-0.8.md +++ b/doc/manual/source/release-notes/rl-0.8.md @@ -39,29 +39,29 @@ Nix 0.8 has the following improvements: notion of “closure store expressions” is gone (and so is the notion of “successors”); the file system references of a store path are now just stored in the database. - + For instance, given any store path, you can query its closure: - + $ nix-store -qR $(which firefox) ... lots of paths ... - + Also, Nix now remembers for each store path the derivation that built it (the “deriver”): - + $ nix-store -qR $(which firefox) /nix/store/4b0jx7vq80l9aqcnkszxhymsf1ffa5jd-firefox-1.0.1.drv - + So to see the build-time dependencies, you can do - + $ nix-store -qR $(nix-store -qd $(which firefox)) - + or, in a nicer format: - + $ nix-store -q --tree $(nix-store -qd $(which firefox)) - + File system references are also stored in reverse. For instance, you can query all paths that directly or indirectly use a certain Glibc: - + $ nix-store -q --referrers-closure \ /nix/store/8lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 @@ -92,28 +92,28 @@ Nix 0.8 has the following improvements: - `nix-channel` has new operations `--list` and `--remove`. - New ways of installing components into user environments: - + - Copy from another user environment: - + $ nix-env -i --from-profile .../other-profile firefox - + - Install a store derivation directly (bypassing the Nix expression language entirely): - + $ nix-env -i /nix/store/z58v41v21xd3...-aterm-2.3.1.drv - + (This is used to implement `nix-install-package`, which is therefore immune to evolution in the Nix expression language.) - + - Install an already built store path directly: - + $ nix-env -i /nix/store/hsyj5pbn0d9i...-aterm-2.3.1 - + - Install the result of a Nix expression specified as a command-line argument: - + $ nix-env -f .../i686-linux.nix -i -E 'x: x.firefoxWrapper' - + The difference with the normal installation mode is that `-E` does not use the `name` attributes of derivations. Therefore, this can be used to disambiguate multiple derivations with the @@ -127,7 +127,7 @@ Nix 0.8 has the following improvements: - Implemented a concurrent garbage collector. It is now always safe to run the garbage collector, even if other Nix operations are happening simultaneously. - + However, there can still be GC races if you use `nix-instantiate` and `nix-store --realise` directly to build things. To prevent races, use the @@ -147,13 +147,13 @@ Nix 0.8 has the following improvements: - The behaviour of the garbage collector can be changed globally by setting options in `/nix/etc/nix/nix.conf`. - + - `gc-keep-derivations` specifies whether deriver links should be followed when searching for live paths. - + - `gc-keep-outputs` specifies whether outputs of derivations should be followed when searching for live paths. - + - `env-keep-derivations` specifies whether user environments should store the paths of derivations when they are added (thus keeping the derivations alive). diff --git a/doc/manual/source/release-notes/rl-2.0.md b/doc/manual/source/release-notes/rl-2.0.md index 9f6d4aa83..aad0de211 100644 --- a/doc/manual/source/release-notes/rl-2.0.md +++ b/doc/manual/source/release-notes/rl-2.0.md @@ -8,13 +8,13 @@ The following incompatible changes have been made: It has been superseded by the binary cache substituter mechanism since several years. As a result, the following programs have been removed: - + - `nix-pull` - + - `nix-generate-patches` - + - `bsdiff` - + - `bspatch` - The “copy from other stores” substituter mechanism @@ -58,26 +58,26 @@ This release has the following new features: `nix-build`, `nix-shell -p`, `nix-env -qa`, `nix-instantiate --eval`, `nix-push` and `nix-copy-closure`. It has the following major features: - + - Unlike the legacy commands, it has a consistent way to refer to packages and package-like arguments (like store paths). For example, the following commands all copy the GNU Hello package to a remote machine: - + nix copy --to ssh://machine nixpkgs.hello - + nix copy --to ssh://machine /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 - + nix copy --to ssh://machine '(with import {}; hello)' - + By contrast, `nix-copy-closure` only accepted store paths as arguments. - + - It is self-documenting: `--help` shows all available command-line arguments. If `--help` is given after a subcommand, it shows examples for that subcommand. `nix --help-config` shows all configuration options. - + - It is much less verbose. By default, it displays a single-line progress indicator that shows how many packages are left to be built or downloaded, and (if there are running builds) the most @@ -85,7 +85,7 @@ This release has the following new features: last few lines of builder output. The full build log can be retrieved using `nix log`. - + - It [provides](https://github.com/NixOS/nix/commit/b8283773bd64d7da6859ed520ee19867742a03ba) all `nix.conf` configuration options as command line flags. For @@ -93,122 +93,122 @@ This release has the following new features: http-connections 100` you can write `--http-connections 100`. Boolean options can be written as `--foo` or `--no-foo` (e.g. `--no-auto-optimise-store`). - + - Many subcommands have a `--json` flag to write results to stdout in JSON format. - + > **Warning** - > + > > Please note that the `nix` command is a work in progress and the > interface is subject to change. - + It provides the following high-level (“porcelain”) subcommands: - + - `nix build` is a replacement for `nix-build`. - + - `nix run` executes a command in an environment in which the specified packages are available. It is (roughly) a replacement for `nix-shell -p`. Unlike that command, it does not execute the command in a shell, and has a flag (`-c`) that specifies the unquoted command line to be executed. - + It is particularly useful in conjunction with chroot stores, allowing Linux users who do not have permission to install Nix in `/nix/store` to still use binary substitutes that assume `/nix/store`. For example, - + nix run --store ~/my-nix nixpkgs.hello -c hello --greeting 'Hi everybody!' - + downloads (or if not substitutes are available, builds) the GNU Hello package into `~/my-nix/nix/store`, then runs `hello` in a mount namespace where `~/my-nix/nix/store` is mounted onto `/nix/store`. - + - `nix search` replaces `nix-env -qa`. It searches the available packages for occurrences of a search string in the attribute name, package name or description. Unlike `nix-env -qa`, it has a cache to speed up subsequent searches. - + - `nix copy` copies paths between arbitrary Nix stores, generalising `nix-copy-closure` and `nix-push`. - + - `nix repl` replaces the external program `nix-repl`. It provides an interactive environment for evaluating and building Nix expressions. Note that it uses `linenoise-ng` instead of GNU Readline. - + - `nix upgrade-nix` upgrades Nix to the latest stable version. This requires that Nix is installed in a profile. (Thus it won’t work on NixOS, or if it’s installed outside of the Nix store.) - + - `nix verify` checks whether store paths are unmodified and/or “trusted” (see below). It replaces `nix-store --verify` and `nix-store --verify-path`. - + - `nix log` shows the build log of a package or path. If the build log is not available locally, it will try to obtain it from the configured substituters (such as [cache.nixos.org](https://cache.nixos.org/), which now provides build logs). - + - `nix edit` opens the source code of a package in your editor. - + - `nix eval` replaces `nix-instantiate --eval`. - + - `nix why-depends` shows why one store path has another in its closure. This is primarily useful to finding the causes of closure bloat. For example, - + nix why-depends nixpkgs.vlc nixpkgs.libdrm.dev - + shows a chain of files and fragments of file contents that cause the VLC package to have the “dev” output of `libdrm` in its closure — an undesirable situation. - + - `nix path-info` shows information about store paths, replacing `nix-store -q`. A useful feature is the option `--closure-size` (`-S`). For example, the following command show the closure sizes of every path in the current NixOS system closure, sorted by size: - + nix path-info -rS /run/current-system | sort -nk2 - + - `nix optimise-store` replaces `nix-store --optimise`. The main difference is that it has a progress indicator. - + A number of low-level (“plumbing”) commands are also available: - + - `nix ls-store` and `nix ls-nar` list the contents of a store path or NAR file. The former is primarily useful in conjunction with remote stores, e.g. - + nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 - + lists the contents of path in a binary cache. - + - `nix cat-store` and `nix cat-nar` allow extracting a file from a store path or NAR file. - + - `nix dump-path` writes the contents of a store path to stdout in NAR format. This replaces `nix-store --dump`. - + - `nix show-derivation` displays a store derivation in JSON format. This is an alternative to `pp-aterm`. - + - `nix add-to-store` replaces `nix-store --add`. - + - `nix sign-paths` signs store paths. - + - `nix copy-sigs` copies signatures from one store to another. - + - `nix show-config` shows all configuration options and their current values. @@ -224,11 +224,11 @@ This release has the following new features: `nix-copy-closure`, `nix-push` and substitution are all instances of the general notion of copying paths between different kinds of Nix stores. - + Stores are specified using an URI-like syntax, e.g. or . The following store types are supported: - + - `LocalStore` (stori URI `local` or an absolute path) and the misnamed `RemoteStore` (`daemon`) provide access to a local Nix store, the latter via the Nix daemon. You can use `auto` or the @@ -236,63 +236,63 @@ This release has the following new features: whether you have write permission to the Nix store. It is no longer necessary to set the `NIX_REMOTE` environment variable to use the Nix daemon. - + As noted above, `LocalStore` now supports chroot builds, allowing the “physical” location of the Nix store (e.g. `/home/alice/nix/store`) to differ from its “logical” location (typically `/nix/store`). This allows non-root users to use Nix while still getting the benefits from prebuilt binaries from [cache.nixos.org](https://cache.nixos.org/). - + - `BinaryCacheStore` is the abstract superclass of all binary cache stores. It supports writing build logs and NAR content listings in JSON format. - + - `HttpBinaryCacheStore` (`http://`, `https://`) supports binary caches via HTTP or HTTPS. If the server supports `PUT` requests, it supports uploading store paths via commands such as `nix copy`. - + - `LocalBinaryCacheStore` (`file://`) supports binary caches in the local filesystem. - + - `S3BinaryCacheStore` (`s3://`) supports binary caches stored in Amazon S3, if enabled at compile time. - + - `LegacySSHStore` (`ssh://`) is used to implement remote builds and `nix-copy-closure`. - + - `SSHStore` (`ssh-ng://`) supports arbitrary Nix operations on a remote machine via the same protocol used by `nix-daemon`. - Security has been improved in various ways: - + - Nix now stores signatures for local store paths. When paths are copied between stores (e.g., copied from a binary cache to a local store), signatures are propagated. - + Locally-built paths are signed automatically using the secret keys specified by the `secret-key-files` store option. Secret/public key pairs can be generated using `nix-store --generate-binary-cache-key`. - + In addition, locally-built store paths are marked as “ultimately trusted”, but this bit is not propagated when paths are copied between stores. - + - Content-addressable store paths no longer require signatures — they can be imported into a store by unprivileged users even if they lack signatures. - + - The command `nix verify` checks whether the specified paths are trusted, i.e., have a certain number of trusted signatures, are ultimately trusted, or are content-addressed. - + - Substitutions from binary caches [now](https://github.com/NixOS/nix/commit/ecbc3fedd3d5bdc5a0e1a0a51b29062f2874ac8b) require signatures by default. This was already the case on NixOS. - + - In Linux sandbox builds, we [now](https://github.com/NixOS/nix/commit/eba840c8a13b465ace90172ff76a0db2899ab11b) use `/build` instead of `/tmp` as the temporary build directory. @@ -309,7 +309,7 @@ This release has the following new features: hash or commit hash is specified. For example, calls to `builtins.fetchGit` are only allowed if a `rev` attribute is specified. - + The goal of this feature is to enable true reproducibility and traceability of builds (including NixOS system configurations) at the evaluation level. For example, in the future, `nixos-rebuild` @@ -367,21 +367,21 @@ This release has the following new features: log will be shown if a build fails. - Networking has been improved: - + - HTTP/2 is now supported. This makes binary cache lookups [much more efficient](https://github.com/NixOS/nix/commit/90ad02bf626b885a5dd8967894e2eafc953bdf92). - + - We now retry downloads on many HTTP errors, making binary caches substituters more resilient to temporary failures. - + - HTTP credentials can now be configured via the standard `netrc` mechanism. - + - If S3 support is enabled at compile time, URIs are [supported](https://github.com/NixOS/nix/commit/9ff9c3f2f80ba4108e9c945bbfda2c64735f987b) in all places where Nix allows URIs. - + - Brotli compression is now supported. In particular, [cache.nixos.org](https://cache.nixos.org/) build logs are now compressed using Brotli. @@ -431,9 +431,9 @@ The Nix language has the following new features: - Derivation attributes can now reference the outputs of the derivation using the `placeholder` builtin function. For example, the attribute - + configureFlags = "--prefix=${placeholder "out"} --includedir=${placeholder "dev"}"; - + will cause the `configureFlags` environment variable to contain the actual store paths corresponding to the `out` and `dev` outputs. @@ -444,7 +444,7 @@ The following builtin functions are new or extended: Nixpkgs, which fetches at build time and cannot be used to fetch Nix expressions during evaluation. A typical use case is to import external NixOS modules from your configuration, e.g. - + imports = [ (builtins.fetchGit https://github.com/edolstra/dwarffs + "/module.nix") ]; - Similarly, `builtins.fetchMercurial` allows you to fetch Mercurial @@ -485,7 +485,7 @@ The Nix build environment has the following changes: builder via the file `.attrs.json` in the builder’s temporary directory. This obviates the need for `passAsFile` since JSON files have no size restrictions, unlike process environments. - + [As a convenience to Bash builders](https://github.com/NixOS/nix/commit/2d5b1b24bf70a498e4c0b378704cfdb6471cc699), Nix writes a script named `.attrs.sh` to the builder’s directory diff --git a/doc/manual/source/release-notes/rl-2.27.md b/doc/manual/source/release-notes/rl-2.27.md new file mode 100644 index 000000000..b4918029a --- /dev/null +++ b/doc/manual/source/release-notes/rl-2.27.md @@ -0,0 +1,66 @@ +# Release 2.27.0 (2025-03-03) + +- `inputs.self.submodules` flake attribute [#12421](https://github.com/NixOS/nix/pull/12421) + + Flakes in Git repositories can now declare that they need Git submodules to be enabled: + ``` + { + inputs.self.submodules = true; + } + ``` + Thus, it's no longer needed for the caller of the flake to pass `submodules = true`. + +- Git LFS support [#10153](https://github.com/NixOS/nix/pull/10153) [#12468](https://github.com/NixOS/nix/pull/12468) + + The Git fetcher now supports Large File Storage (LFS). This can be enabled by passing the attribute `lfs = true` to the fetcher, e.g. + ```console + nix flake prefetch 'git+ssh://git@github.com/Apress/repo-with-large-file-storage.git?lfs=1' + ``` + + A flake can also declare that it requires LFS to be enabled: + ``` + { + inputs.self.lfs = true; + } + ``` + + Author: [**@b-camacho**](https://github.com/b-camacho), [**@kip93**](https://github.com/kip93) + +- Handle the case where a chroot store is used and some inputs are in the "host" `/nix/store` [#12512](https://github.com/NixOS/nix/pull/12512) + + The evaluator now presents a "union" filesystem view of the `/nix/store` in the host and the chroot. + + This change also removes some hacks that broke `builtins.{path,filterSource}` in chroot stores [#11503](https://github.com/NixOS/nix/issues/11503). + +- `nix flake prefetch` now has a `--out-link` option [#12443](https://github.com/NixOS/nix/pull/12443) + +- Set `FD_CLOEXEC` on sockets created by curl [#12439](https://github.com/NixOS/nix/pull/12439) + + Curl created sockets without setting `FD_CLOEXEC`/`SOCK_CLOEXEC`. This could previously cause connections to remain open forever when using commands like `nix shell`. This change sets the `FD_CLOEXEC` flag using a `CURLOPT_SOCKOPTFUNCTION` callback. + +# Contributors + +This release was made possible by the following 21 contributors: + +- Aiden Fox Ivey [**(@aidenfoxivey)**](https://github.com/aidenfoxivey) +- Ben Millwood [**(@bmillwood)**](https://github.com/bmillwood) +- Brian Camacho [**(@b-camacho)**](https://github.com/b-camacho) +- Brian McKenna [**(@puffnfresh)**](https://github.com/puffnfresh) +- Eelco Dolstra [**(@edolstra)**](https://github.com/edolstra) +- Fabian Möller [**(@B4dM4n)**](https://github.com/B4dM4n) +- Illia Bobyr [**(@ilya-bobyr)**](https://github.com/ilya-bobyr) +- Ivan Trubach [**(@tie)**](https://github.com/tie) +- John Ericson [**(@Ericson2314)**](https://github.com/Ericson2314) +- Jörg Thalheim [**(@Mic92)**](https://github.com/Mic92) +- Leandro Emmanuel Reina Kiperman [**(@kip93)**](https://github.com/kip93) +- MaxHearnden [**(@MaxHearnden)**](https://github.com/MaxHearnden) +- Philipp Otterbein +- Robert Hensing [**(@roberth)**](https://github.com/roberth) +- Sandro [**(@SuperSandro2000)**](https://github.com/SuperSandro2000) +- Sergei Zimmerman [**(@xokdvium)**](https://github.com/xokdvium) +- Silvan Mosberger [**(@infinisil)**](https://github.com/infinisil) +- Someone [**(@SomeoneSerge)**](https://github.com/SomeoneSerge) +- Steve Walker [**(@stevalkr)**](https://github.com/stevalkr) +- bcamacho2 [**(@bcamacho2)**](https://github.com/bcamacho2) +- silvanshade [**(@silvanshade)**](https://github.com/silvanshade) +- tomberek [**(@tomberek)**](https://github.com/tomberek) diff --git a/doc/manual/source/store/building.md b/doc/manual/source/store/building.md new file mode 100644 index 000000000..dbfe6b5ca --- /dev/null +++ b/doc/manual/source/store/building.md @@ -0,0 +1,100 @@ +# Building + +## Normalizing derivation inputs + +- Each input must be [realised] prior to building the derivation in question. + +[realised]: @docroot@/glossary.md#gloss-realise + +- Once this is done, the derivation is *normalized*, replacing each input deriving path with its store path, which we now know from realising the input. + +## Builder Execution + +The [`builder`](./derivation/index.md#builder) is executed as follows: + +- A temporary directory is created under the directory specified by + `TMPDIR` (default `/tmp`) where the build will take place. The + current directory is changed to this directory. + +- The environment is cleared and set to the derivation attributes, as + specified above. + +- In addition, the following variables are set: + + - `NIX_BUILD_TOP` contains the path of the temporary directory for + this build. + + - Also, `TMPDIR`, `TEMPDIR`, `TMP`, `TEMP` are set to point to the + temporary directory. This is to prevent the builder from + accidentally writing temporary files anywhere else. Doing so + might cause interference by other processes. + + - `PATH` is set to `/path-not-set` to prevent shells from + initialising it to their built-in default value. + + - `HOME` is set to `/homeless-shelter` to prevent programs from + using `/etc/passwd` or the like to find the user's home + directory, which could cause impurity. Usually, when `HOME` is + set, it is used as the location of the home directory, even if + it points to a non-existent path. + + - `NIX_STORE` is set to the path of the top-level Nix store + directory (typically, `/nix/store`). + + - `NIX_ATTRS_JSON_FILE` & `NIX_ATTRS_SH_FILE` if `__structuredAttrs` + is set to `true` for the derivation. A detailed explanation of this + behavior can be found in the + [section about structured attrs](@docroot@/language/advanced-attributes.md#adv-attr-structuredAttrs). + + - For each output declared in `outputs`, the corresponding + environment variable is set to point to the intended path in the + Nix store for that output. Each output path is a concatenation + of the cryptographic hash of all build inputs, the `name` + attribute and the output name. (The output name is omitted if + it’s `out`.) + +- If an output path already exists, it is removed. Also, locks are + acquired to prevent multiple [Nix instances][Nix instance] from performing the same + build at the same time. + +- A log of the combined standard output and error is written to + `/nix/var/log/nix`. + +- The builder is executed with the arguments specified by the + attribute `args`. If it exits with exit code 0, it is considered to + have succeeded. + +- The temporary directory is removed (unless the `-K` option was + specified). + +## Processing outputs + +If the builder exited successfully, the following steps happen in order to turn the output directories left behind by the builder into proper store objects: + +- **Normalize the file permissions** + + Nix sets the last-modified timestamp on all files + in the build result to 1 (00:00:01 1/1/1970 UTC), sets the group to + the default group, and sets the mode of the file to 0444 or 0555 + (i.e., read-only, with execute permission enabled if the file was + originally executable). Any possible `setuid` and `setgid` + bits are cleared. + + > **Note** + > + > Setuid and setgid programs are not currently supported by Nix. + > This is because the Nix archives used in deployment have no concept of ownership information, + > and because it makes the build result dependent on the user performing the build. + +- **Calculate the references** + + Nix scans each output path for + references to input paths by looking for the hash parts of the input + paths. Since these are potential runtime dependencies, Nix registers + them as dependencies of the output paths. + + Nix also scans for references to other outputs' paths in the same way, because outputs are allowed to refer to each other. + If the outputs' references to each other form a cycle, this is an error, because the references of store objects much be acyclic. + + +[Nix instance]: @docroot@/glossary.md#gloss-nix-instance diff --git a/doc/manual/source/store/derivation/index.md b/doc/manual/source/store/derivation/index.md new file mode 100644 index 000000000..911c28485 --- /dev/null +++ b/doc/manual/source/store/derivation/index.md @@ -0,0 +1,302 @@ +# Store Derivation and Deriving Path + +Besides functioning as a [content-addressed store], the Nix store layer works as a [build system]. +Other systems (like Git or IPFS) also store and transfer immutable data, but they don't concern themselves with *how* that data was created. + +This is where Nix distinguishes itself. +*Derivations* represent individual build steps, and *deriving paths* are needed to refer to the *outputs* of those build steps before they are built. + + +## Store Derivation {#store-derivation} + +A derivation is a specification for running an executable on precisely defined input to produce on more [store objects][store object]. +These store objects are known as the derivation's *outputs*. + +Derivations are *built*, in which case the process is spawned according to the spec, and when it exits, required to leave behind files which will (after post-processing) become the outputs of the derivation. +This process is described in detail in [Building](@docroot@/store/building.md). + + + +A derivation consists of: + + - A name + + - An [inputs specification][inputs], a set of [deriving paths][deriving path] + + - An [outputs specification][outputs], specifying which outputs should be produced, and various metadata about them. + + - The ["system" type][system] (e.g. `x86_64-linux`) where the executable is to run. + + - The [process creation fields]: to spawn the arbitrary process which will perform the build step. + +[store derivation]: #store-derivation +[inputs]: #inputs +[input]: #inputs +[outputs]: ./outputs/index.md +[output]: ./outputs/index.md +[process creation fields]: #process-creation-fields +[builder]: #builder +[args]: #args +[env]: #env +[system]: #system +[content-addressed store]: @docroot@/glossary.md#gloss-content-addressed-store +[build system]: @docroot@/glossary.md#gloss-build-system + +### Referencing derivations {#derivation-path} + +Derivations are always referred to by the [store path] of the store object they are encoded to. +See the [encoding section](#derivation-encoding) for more details on how this encoding works, and thus what exactly what store path we would end up with for a given derivation. + +The store path of the store object which encodes a derivation is often called a *derivation path* for brevity. + +## Deriving path {#deriving-path} + +Deriving paths are a way to refer to [store objects][store object] that may or may not yet be [realised][realise]. +There are two forms: + +- [*constant*]{#deriving-path-constant}: just a [store path]. + It can be made [valid][validity] by copying it into the store: from the evaluator, command line interface or another store. + +- [*output*]{#deriving-path-output}: a pair of a [store path] to a [store derivation] and an [output] name. + +In pseudo code: + +```typescript +type OutputName = String; + +type ConstantPath = { + path: StorePath; +}; + +type OutputPath = { + drvPath: StorePath; + output: OutputName; +}; + +type DerivingPath = ConstantPath | OutputPath; +``` + +Deriving paths are necessary because, in general and particularly for [content-addressing derivations][content-addressing derivation], the [store path] of an [output] is not known in advance. +We can use an output deriving path to refer to such an output, instead of the store path which we do not yet know. + +[deriving path]: #deriving-path +[validity]: @docroot@/glossary.md#gloss-validity + +## Parts of a derivation + +A derivation is constructed from the parts documented in the following subsections. + +### Inputs {#inputs} + +The inputs are a set of [deriving paths][deriving path], referring to all store objects needed in order to perform this build step. + +The [process creation fields] will presumably include many [store paths][store path]: + + - The path to the executable normally starts with a store path + - The arguments and environment variables likely contain many other store paths. + +But rather than somehow scanning all the other fields for inputs, Nix requires that all inputs be explicitly collected in the inputs field. It is instead the responsibility of the creator of a derivation (e.g. the evaluator) to ensure that every store object referenced in another field (e.g. referenced by store path) is included in this inputs field. + +### System {#system} + +The system type on which the [`builder`](#attr-builder) executable is meant to be run. + +A necessary condition for Nix to schedule a given derivation on some [Nix instance] is for the "system" of that derivation to match that instance's [`system` configuration option] or [`extra-platforms` configuration option]. + +By putting the `system` in each derivation, Nix allows *heterogenous* build plans, where not all steps can be run on the same machine or same sort of machine. +Nix can schedule builds such that it automatically builds on other platforms by [forwarding build requests](@docroot@/advanced-topics/distributed-builds.md) to other Nix instances. + +[`system` configuration option]: @docroot@/command-ref/conf-file.md#conf-system +[`extra-platforms` configuration option]: @docroot@/command-ref/conf-file.md#conf-extra-platforms + +[content-addressing derivation]: @docroot@/glossary.md#gloss-content-addressing-derivation +[realise]: @docroot@/glossary.md#gloss-realise +[store object]: @docroot@/store/store-object.md +[store path]: @docroot@/store/store-path.md + +### Process creation fields {#process-creation-fields} + +These are the three fields which describe how to spawn the process which (along with any of its own child processes) will perform the build. +You may note that this has everything needed for an `execve` system call. + +#### Builder {#builder} + +This is the path to an executable that will perform the build and produce the [outputs]. + +#### Arguments {#args} + +Command-line arguments to be passed to the [`builder`](#builder) executable. + +Note that these are the arguments after the first argument. +The first argument passed to the `builder` will be the value of `builder`, as per the usual convention on Unix. +See [Wikipedia](https://en.wikipedia.org/wiki/Argv) for details. + +#### Environment Variables {#env} + +Environment variables which will be passed to the [builder](#builder) executable. + +### Placeholders + +Placeholders are opaque values used within the [process creation fields] to [store objects] for which we don't yet know [store path]s. +They are strings in the form `/` that are embedded anywhere within the strings of those fields, and we are [considering](https://github.com/NixOS/nix/issues/12361) to add store-path-like placeholders. + +> **Note** +> +> Output Deriving Path exist to solve the same problem as placeholders --- that is, referring to store objects for which we don't yet know a store path. +> They also have a string syntax with `^`, [described in the encoding section](#deriving-path-encoding). +> We could use that syntax instead of `/` for placeholders, but its human-legibility would cause problems. + +There are two types of placeholder, corresponding to the two cases where this problem arises: + +- [Output placeholder]{#output-placeholder}: + + This is a placeholder for a derivation's own output. + +- [Input placeholder]{#input-placeholder}: + + This is a placeholder to a derivation's non-constant [input], + i.e. an input that is an [output derived path]. + +> **Explanation** +> +> In general, we need to realise [realise] a [store object] in order to be sure to have a store object for it. +> But for these two cases this is either impossible or impractical: +> +> - In the output case this is impossible: +> +> We cannot build the output until we have a correct derivation, and we cannot have a correct derivation (without using placeholders) until we have the output path. +> +> - In the input case this is impractical: +> +> If we always build a dependency first, and then refer to its output by store path, we would lose the ability for a derivation graph to describe an entire build plan consisting of multiple build steps. + +## Encoding + +### Derivation {#derivation-encoding} + +There are two formats, documented separately: + +- The legacy ["ATerm" format](@docroot@/protocols/derivation-aterm.md) + +- The experimental, currently under development and changing [JSON format](@docroot@/protocols/json/derivation.md) + +Every derivation has a canonical choice of encoding used to serialize it to a store object. +This ensures that there is a canonical [store path] used to refer to the derivation, as described in [Referencing derivations](#derivation-path). + +> **Note** +> +> Currently, the canonical encoding for every derivation is the "ATerm" format, +> but this is subject to change for types derivations which are not yet stable. + +Regardless of the format used, when serializing a derivation to a store object, that store object will be content-addressed. + +In the common case, the inputs to store objects are either: + + - [constant deriving paths](#deriving-path-constant) for content-addressed source objects, which are "initial inputs" rather than the outputs of some other derivation + + - the outputs of other derivations + +If those other derivations *also* abide by this common case (and likewise for transitive inputs), then the entire closure of the serialized derivation will be content-addressed. + +### Deriving Path {#deriving-path-encoding} + +- *constant* + + Constant deriving paths are encoded simply as the underlying store path is. + Thus, we see that every encoded store path is also a valid encoded (constant) deriving path. + +- *output* + + Output deriving paths are encoded by + + - encoding of a store path referring to a derivation + + - a `^` separator (or `!` in some legacy contexts) + + - the name of an output of the previously referred derivation + + > **Example** + > + > ``` + > /nix/store/lxrn8v5aamkikg6agxwdqd1jz7746wz4-firefox-98.0.2.drv^out + > ``` + > + > This parses like so: + > + > ``` + > /nix/store/lxrn8v5aamkikg6agxwdqd1jz7746wz4-firefox-98.0.2.drv^out + > |------------------------------------------------------------| |-| + > store path (usual encoding) output name + > |--| + > note the ".drv" + > ``` + +## Extending the model to be higher-order + +**Experimental feature**: [`dynamic-derivations`](@docroot@/development/experimental-features.md#xp-feature-dynamic-derivations) + +So far, we have used store paths to refer to derivations. +That works because we've implicitly assumed that all derivations are created *statically* --- created by some mechanism out of band, and then manually inserted into the store. +But what if derivations could also be created dynamically within Nix? +In other words, what if derivations could be the outputs of other derivations? + +> **Note** +> +> In the parlance of "Build Systems à la carte", we are generalizing the Nix store layer to be a "Monadic" instead of "Applicative" build system. + +How should we refer to such derivations? +A deriving path works, the same as how we refer to other derivation outputs. +But what about a dynamic derivations output? +(i.e. how do we refer to the output of a derivation, which is itself an output of a derivation?) +For that we need to generalize the definition of deriving path, replacing the store path used to refer to the derivation with a nested deriving path: + +```diff + type OutputPath = { +- drvPath: StorePath; ++ drvPath: DerivingPath; + output: OutputName; + }; +``` + +Now, the `drvPath` field of `OutputPath` is itself a `DerivingPath` instead of a `StorePath`. + +With that change, here is updated definition: + +```typescript +type OutputName = String; + +type ConstantPath = { + path: StorePath; +}; + +type OutputPath = { + drvPath: DerivingPath; + output: OutputName; +}; + +type DerivingPath = ConstantPath | OutputPath; +``` + +Under this extended model, `DerivingPath`s are thus inductively built up from a root `ConstantPath`, wrapped with zero or more outer `OutputPath`s. + +### Encoding {#deriving-path-encoding} + +The encoding is adjusted in the natural way, encoding the `drv` field recursively using the same deriving path encoding. +The result of this is that it is possible to have a chain of `^` at the end of the final string, as opposed to just a single one. + +> **Example** +> +> ``` +> /nix/store/lxrn8v5aamkikg6agxwdqd1jz7746wz4-firefox-98.0.2.drv^foo.drv^bar.drv^out +> |----------------------------------------------------------------------------| |-| +> inner deriving path (usual encoding) output name +> |--------------------------------------------------------------------| |-----| +> even more inner deriving path (usual encoding) output name +> |------------------------------------------------------------| |-----| +> innermost constant store path (usual encoding) output name +> ``` + +[Nix instance]: @docroot@/glossary.md#gloss-nix-instance diff --git a/doc/manual/source/store/derivation/outputs/content-address.md b/doc/manual/source/store/derivation/outputs/content-address.md new file mode 100644 index 000000000..4539a5eba --- /dev/null +++ b/doc/manual/source/store/derivation/outputs/content-address.md @@ -0,0 +1,192 @@ +# Content-addressing derivation outputs + +The content-addressing of an output only depends on that store object itself, not any other information external (such has how it was made, when it was made, etc.). +As a consequence, a store object will be content-addressed the same way regardless of whether it was manually inserted into the store, outputted by some derivation, or outputted by a some other derivation. + +The output spec for a content-addressed output must contains the following field: + +- *method*: how the data of the store object is digested into a content address + +The possible choices of *method* are described in the [section on content-addressing store objects](@docroot@/store/store-object/content-address.md). +Given the method, the output's name (computed from the derivation name and output spec mapping as described above), and the data of the store object, the output's store path will be computed as described in that section. + +## Fixed-output content-addressing {#fixed} + +In this case the content address of the *fixed* in advanced by the derivation itself. +In other words, when the derivation has finished [building](@docroot@/store/building.md), and the provisional output' content-address is computed as part of the process to turn it into a *bona fide* store object, the calculated content address must much that given in the derivation, or the build of that derivation will be deemed a failure. + +The output spec for an output with a fixed content addresses additionally contains: + +- *hash*, the hash expected from digesting the store object's file system objects. + This hash may be of a freely-chosen hash algorithm (that Nix supports) + +> **Design note** +> +> In principle, the output spec could also specify the references the store object should have, since the references and file system objects are equally parts of a content-addressed store object proper that contribute to its content-addressed. +> However, at this time, the references are not not done because all fixed content-addressed outputs are required to have no references (including no self-reference). +> +> Also in principle, rather than specifying the references and file system object data with separate hashes, a single hash that constraints both could be used. +> This could be done with the final store path's digest, or better yet, the hash that will become the store path's digest before it is truncated. +> +> These possible future extensions are included to elucidate the core property of fixed-output content addressing --- that all parts of the output must be cryptographically fixed with one or more hashes --- separate from the particulars of the currently-supported store object content-addressing schemes. + +### Design rationale + +What is the purpose of fixing an output's content address in advanced? +In abstract terms, the answer is carefully controlled impurity. +Unlike a regular derivation, the [builder] executable of a derivation that produced fixed outputs has access to the network. +The outputs' guaranteed content-addresses are supposed to mitigate the risk of the builder being given these capabilities; +regardless of what the builder does *during* the build, it cannot influence downstream builds in unanticipated ways because all information it passed downstream flows through the outputs whose content-addresses are fixed. + +[builder]: @docroot@/store/derivation/index.md#builder + +In concrete terms, the purpose of this feature is fetching fixed input data like source code from the network. +For example, consider a family of "fetch URL" derivations. +These derivations download files from given URL. +To ensure that the downloaded file has not been modified, each derivation must also specify a cryptographic hash of the file. +For example, + +```jsonc +{ + "outputs: { + "out": { + "method": "nar", + "hashAlgo": "sha256", + "hash: "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465", + }, + }, + "env": { + "url": "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz" + // ... + }, + // ... +} +``` + +It sometimes happens that the URL of the file changes, +e.g., because servers are reorganised or no longer available. +In these cases, we then must update the call to `fetchurl`, e.g., + +```diff + "env": { +- "url": "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz" ++ "url": "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz" + // ... + }, +``` + +If a `fetchurl` derivation's outputs were [input-addressed][input addressing], the output paths of the derivation and of *all derivations depending on it* would change. +For instance, if we were to change the URL of the Glibc source distribution in Nixpkgs (a package on which almost all other packages depend on Linux) massive rebuilds would be needed. +This is unfortunate for a change which we know cannot have a real effect as it propagates upwards through the dependency graph. + +For content-addressed outputs (fixed or floating), on the other hand, the outputs' store path only depends on the derivation's name, data, and the `method` of the outputs' specs. +The rest of the derivation is ignored for the purpose of computing the output path. + +> **History Note** +> +> Fixed content-addressing is especially important both today and historically as the *only* form of content-addressing that is stabilized. +> This is why the rationale above contrasts it with [input addressing]. + +## (Floating) Content-Addressing {#floating} + +> **Warning** +> This is part of an [experimental feature](@docroot@/development/experimental-features.md). +> +> To use this type of output addressing, you must enable the +> [`ca-derivations`][xp-feature-ca-derivations] experimental feature. +> For example, in [nix.conf](@docroot@/command-ref/conf-file.md) you could add: +> +> ``` +> extra-experimental-features = ca-derivations +> ``` + +With this experimemental feature enabled, derivation outputs can also be content-addressed *without* fixing in the output spec what the outputs' content address must be. + +### Purity + +Because the derivation output is not fixed (just like with [input addressing]), the [builder] is not given any impure capabilities [^purity]. + +> **Configuration note** +> +> Strictly speaking, the extent to which sandboxing and deprivilaging is possible varies with the environment Nix is running in. +> Nix's configuration settings indicate what level of sandboxing is required or enabled. +> Builds of derivations will fail if they request an absense of sandboxing which is not allowed. +> Builds of derivations will also fail if the level of sandboxing specified in the configure exceeds what is possible in teh given environment. +> +> (The "environment", in this case, consists of attributes such as the Operating System Nix runs atop, along with the operating-system-specific privilages that Nix has been granted. +> Because of how conventional operating systems like macos, Linux, etc. work, granting builders *fewer* privilages may ironically require that Nix be run with *more* privilages.) + +That said, derivations producing floating content-addressed outputs may declare their builders as impure (like the builders of derivations producing producing fixed outputs). +This is provisionally supported as part of the [`impure-derivations`][xp-feature-impure-derivations] experimental feature. + +### Compatibility negotiation + +Any derivation producing a floating content-addresssed output implicitly requires the `ca-derivations` [system feature](@docroot@/command-ref/conf-file.md#conf-system-features). +This prevents scheduling the building of the derivation on a machine without the experimental feature enabled. +Even once the experimental feature is stabilized, this is still useful in order to be allow using remote builder running odler versions of Nix, or alternative implementations that do not support floating content addressing. + +### Determinism + +In the earlier [discussion of how self-references are handled when content-addressing store objects](@docroot@/store/store-object/content-address.html#self-references), it was pointed out that methods of producing store objects ought to be deterministic regardless of the choice of provisional store path. +For store objects produced by manually inserting into the store to create a store object, the "method of production" is an informally concept --- formally, Nix has no idea where the store object came from, and content-addressing is crucial in order to ensure that the derivation is *intrinsically* tamper-proof. +But for store objects produced by derivation, the "method is quite formal" --- the whole point of derivations is to be a formal notion of building, after all. +In this case, we can elevate this informal property to a formal one. + +A *determinstic* content-addressing derivation should produce outputs with the same content addresses: + +1. Every time the builder is run + + This is because either the builder is completely sandboxed, or because all any remaining impurities that leak inside the build sandbox are ignored by the builder and do not influence its behavior. + +2. Regardless of the choice of any provisional outputs paths + + Provisional store paths must be chosen for any output that has a self-reference. + The choice of provisional store path can be thought of as an impurity, since it is an arbitrary choice. + + If provisional outputs paths are deterministically chosen, we are in the first branch of part (1). + The builder the data it produces based on it in arbitrary ways, but this gets us closer to to [input addressing]. + Deterministically choosing the provisional path may be considered "complete sandboxing" by removing an impurity, but this is unsatisfactory + + + + If provisional outputs paths are randomly chosen, we are in the second branch of part (1). + The builder *must* not let the random input affect the final outputs it produces, and multiple builds may be performed and the compared in order to ensure that this is in fact the case. + +### Floating versus Fixed + +While the distinction between content- and input-addressing is one of *mechanism*, the distinction between fixed and floating content addressing is more one of *policy*. +A fixed output that passes its content address check is just like a floating output. +It is only in the potential for that check to fail that they are different. + +> **Design Note** +> +> In a future world where floating content-addressing is also stable, we in principle no longer need separate [fixed](#fixed) content-addressing. +> Instead, we could always use floating content-addressing, and separately assert the precise value content address of a given store object to be used as an input (of another derivation). +> A stand-alone assertion object of this sort is not yet implemented, but its possible creation is tracked in [Issue #11955](https://github.com/NixOS/nix/issues/11955). +> +> In the current version of Nix, fixed outputs which fail their hash check are still registered as valid store objects, just not registered as outputs of the derivation which produced them. +> This is an optimization that means if the wrong output hash is specified in a derivation, and then the derivation is recreated with the right output hash, derivation does not need to be rebuilt --- avoiding downloading potentially large amounts of data twice. +> This optimisation prefigures the design above: +> If the output hash assertion was removed outside the derivation itself, Nix could additionally not only register that outputted store object like today, but could also make note that derivation did in fact successfully download some data. +For example, for the "fetch URL" example above, making such a note is tantamount to recording what data is available at the time of download at the given URL. +> It would only be when Nix subsequently tries to build something with that (refining our example) downloaded source code that Nix would be forced to check the output hash assertion, preventing it from e.g. building compromised malware. +> +> Recapping, Nix would +> +> 1. successfully download data +> 2. insert that data into the store +> 3. associate (presumably with some sort of expiration policy) the downloaded data with the derivation that downloaded it +> +> But only use the downloaded store object in subsequent derivations that depended upon the assertion if the assertion passed. +> +> This possible future extension is included to illustrate this distinction: + +[input addressing]: ./input-address.md +[xp-feature-ca-derivations]: @docroot@/development/experimental-features.md#xp-feature-ca-derivations +[xp-feature-git-hashing]: @docroot@/development/experimental-features.md#xp-feature-git-hashing +[xp-feature-impure-derivations]: @docroot@/development/experimental-features.md#xp-feature-impure-derivations diff --git a/doc/manual/source/store/derivation/outputs/index.md b/doc/manual/source/store/derivation/outputs/index.md new file mode 100644 index 000000000..15070a18f --- /dev/null +++ b/doc/manual/source/store/derivation/outputs/index.md @@ -0,0 +1,97 @@ +# Derivation Outputs and Types of Derivations + +As stated on the [main pages on derivations](../index.md#store-derivation), +a derivation produces [store objects], which are known as the *outputs* of the derivation. +Indeed, the entire point of derivations is to produce these outputs, and to reliably and reproducably produce these derivations each time the derivation is run. + +One of the parts of a derivation is its *outputs specification*, which specifies certain information about the outputs the derivation produces when run. +The outputs specification is a map, from names to specifications for individual outputs. + +## Output Names {#outputs} + +Output names can be any string which is also a valid [store path] name. +The name mapped to each output specification is not actually the name of the output. +In the general case, the output store object has name `derivationName + "-" + outputSpecName`, not any other metadata about it. +However, an output spec named "out" describes and output store object whose name is just the derivation name. + +> **Example** +> +> A derivation is named `hello`, and has two outputs, `out`, and `dev` +> +> - The derivation's path will be: `/nix/store/-hello.drv`. +> +> - The store path of `out` will be: `/nix/store/-hello`. +> +> - The store path of `dev` will be: `/nix/store/-hello-dev`. + +The outputs are the derivations are the [store objects][store object] it is obligated to produce. + +> **Note** +> +> The formal terminology here is somewhat at adds with everyday communication in the Nix community today. +> "output" in casual usage tends to refer to either to the actual output store object, or the notional output spec, depending on context. +> +> For example "hello's `dev` output" means the store object referred to by the store path `/nix/store/-hello-dev`. +> It is unusual to call this the "`hello-dev` output", even though `hello-dev` is the actual name of that store object. + +## Types of output addressing + +The main information contained in an output specification is how the derivation output is addressed. +In particular, the specification decides: + +- whether the output is [content-addressed](./content-address.md) or [input-addressed](./input-address.md) + +- if the content is content-addressed, how is it content addressed + +- if the content is content-addressed, [what is its content address](./content-address.md#fixed-content-addressing) (and thus what is its [store path]) + +## Types of derivations + +The sections on each type of derivation output addressing ended up discussing other attributes of the derivation besides its outputs, such as purity, scheduling, determinism, etc. +This is no concidence; for the type of a derivation is in fact one-for-one with the type of its outputs: + +- A derivation that produces *xyz-addressed* outputs is an *xyz-addressing* derivations. + +The rules for this are fairly concise: + +- All the outputs must be of the same type / use the same addressing + + - The derivation must have at least one output + + - Additionally, if the outputs are fixed content-addressed, there must be exactly one output, whose specification is mapped from the name `out`. + (The name `out` is special, according to the rules described above. + Having only one output and calling its specification `out` means the single output is effectively anonymous; the store path just has the derivation name.) + + (This is an arbitrary restriction that could be lifted.) + +- The output is either *fixed* or *floating*, indicating whether the its store path is known prior to building it. + + - With fixed content-addressing it is fixed. + + > A *fixed content-addressing* derivation is also called a *fixed-output derivation*, since that is the only currently-implemented form of fixed-output addressing + + - With floating content-addressing or input-addressing it is floating. + + > Thus, historically with Nix, with no experimental features enabled, *all* outputs are fixed. + +- The derivation may be *pure* or *impure*, indicating what read access to the outside world the [builder](../index.md#builder) has. + + - An input-addressing derivation *must* be pure. + + > If it is impure, we would have a large problem, because an input-addressed derivation always produces outputs with the same paths. + + + - A content-addressing derivation may be pure or impure + + - If it is impure, it may be be fixed (typical), or it may be floating if the additional [`impure-derivations`][xp-feature-impure-derivations] experimental feature is enabled. + + - If it is pure, it must be floating. + + - Pure, fixed content-addressing derivations are not suppported + + > There is no use for this forth combination. + > The sole purpose of an output's store path being fixed is to support the derivation being impure. + +[xp-feature-ca-derivations]: @docroot@/development/experimental-features.md#xp-feature-ca-derivations +[xp-feature-git-hashing]: @docroot@/development/experimental-features.md#xp-feature-git-hashing +[xp-feature-impure-derivations]: @docroot@/development/experimental-features.md#xp-feature-impure-derivations diff --git a/doc/manual/source/store/derivation/outputs/input-address.md b/doc/manual/source/store/derivation/outputs/input-address.md new file mode 100644 index 000000000..e2e15a801 --- /dev/null +++ b/doc/manual/source/store/derivation/outputs/input-address.md @@ -0,0 +1,31 @@ +# Input-addressing derivation outputs + +[input addressing]: #input-addressing + +"Input addressing" means the address the store object by the *way it was made* rather than *what it is*. +That is to say, an input-addressed output's store path is a function not of the output itself, but of the derivation that produced it. +Even if two store paths have the same contents, if they are produced in different ways, and one is input-addressed, then they will have different store paths, and thus guaranteed to not be the same store object. + + + +[xp-feature-ca-derivations]: @docroot@/development/experimental-features.md#xp-feature-ca-derivations +[xp-feature-git-hashing]: @docroot@/development/experimental-features.md#xp-feature-git-hashing +[xp-feature-impure-derivations]: @docroot@/development/experimental-features.md#xp-feature-impure-derivations diff --git a/doc/manual/source/store/store-object/content-address.md b/doc/manual/source/store/store-object/content-address.md index 02dce2836..ff77dd4b6 100644 --- a/doc/manual/source/store/store-object/content-address.md +++ b/doc/manual/source/store/store-object/content-address.md @@ -24,13 +24,17 @@ For the full specification of the algorithms involved, see the [specification of ### File System Objects -With all currently supported store object content addressing methods, the file system object is always [content-addressed][fso-ca] first, and then that hash is incorporated into content address computation for the store object. +With all currently-supported store object content-addressing methods, the file system object is always [content-addressed][fso-ca] first, and then that hash is incorporated into content address computation for the store object. ### References +#### References to other store objects + With all currently supported store object content addressing methods, other objects are referred to by their regular (string-encoded-) [store paths][Store Path]. +#### Self-references + Self-references however cannot be referred to by their path, because we are in the midst of describing how to compute that path! > The alternative would require finding as hash function fixed point, i.e. the solution to an equation in the form @@ -40,7 +44,28 @@ Self-references however cannot be referred to by their path, because we are in t > which is computationally infeasible. > As far as we know, this is equivalent to finding a hash collision. -Instead we just have a "has self reference" boolean, which will end up affecting the digest. +Instead we have a "has self-reference" boolean, which ends up affecting the digest: +In all currently-supported store object content-addressing methods, when hashing the file system object data, any occurence of store object's own store path in the digested data is replaced with a [sentinel value](https://en.wikipedia.org/wiki/Sentinel_value). +The hashes of these modified input streams are used instead. + +When validating the content address of a store object after the fact, the above process works as written. +However, when first creating the store object we don't know the store object's store path, as explained just above. +We therefore, strictly speaking, do not know what value we will be replacing with the sentinental value in the inputs to hash functions. +What instead happens is that the provisional store object --- the data from which we wish to create a store object --- is paired with a provisional "scratch" store path (that presumably was chosen when the data was created). +That provisional store path is instead what is replaced with the sentinel value, rather than the final store object which we do not yet know. + +> **Design note** +> +> It is an informal property of content-addressed store objects that the choice of provisional store path should not matter. +> In other words, if a provisional store object is prepared in the same way except for the choice of provision store path, the provisional data need not be identical. +> But, after the sentinel value is substituted in place of each provisional store object's provision store path, the final so-normalized data *should* be identical. +> +> If, conversely, the data after this normalization process is still different, we'll compute a different content-address. +> The method of preparing the provisional self-referenced data has *failed* to be deterministic in the sense of not *leaking* the choice of provisional store path --- a choice which is supposed to be arbitrary --- into the final store object. +> +> This property is informal because at this stage, we are just described store objects, which have no formal notion of their origin. +> Without such a formal notion, there is nothing to formally accuse of being insufficiently deterministic. +> Where we cover [derivations](@docroot@/store/derivation/index.md), we will have a chance to make this a formal property, not of content-addressed store objects themselves, but of derivations that *produce* content-addressed store objects. ### Name and Store Directory @@ -63,7 +88,7 @@ References are not supported: store objects with flat hashing *and* references c This also uses the corresponding [Flat](../file-system-object/content-address.md#serial-flat) method of file system object content addressing. -References to other store objects are supported, but self references are not. +References to other store objects are supported, but self-references are not. This is the only store-object content-addressing method that is not named identically with a corresponding file system object method. It is somewhat obscure, mainly used for "drv files" @@ -74,7 +99,7 @@ Prefer another method if possible. This uses the corresponding [Nix Archive](../file-system-object/content-address.md#serial-nix-archive) method of file system object content addressing. -References (to other store objects and self references alike) are supported so long as the hash algorithm is SHA-256, but not (neither kind) otherwise. +References (to other store objects and self-references alike) are supported so long as the hash algorithm is SHA-256, but not (neither kind) otherwise. ### Git { #method-git } diff --git a/flake.nix b/flake.nix index 9cac46d09..a1a7b160c 100644 --- a/flake.nix +++ b/flake.nix @@ -34,9 +34,7 @@ officialRelease = true; - linux32BitSystems = [ - # "i686-linux" - ]; + linux32BitSystems = [ ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" @@ -55,7 +53,6 @@ # Disabled because of https://github.com/NixOS/nixpkgs/issues/344423 # "x86_64-unknown-netbsd" "x86_64-unknown-freebsd" - #"x86_64-w64-mingw32" ]; stdenvs = [ @@ -82,14 +79,7 @@ forAllCrossSystems = lib.genAttrs crossSystems; - forAllStdenvs = - f: - lib.listToAttrs ( - map (stdenvName: { - name = "${stdenvName}Packages"; - value = f stdenvName; - }) stdenvs - ); + forAllStdenvs = lib.genAttrs stdenvs; # We don't apply flake-parts to the whole flake so that non-development attributes # load without fetching any development inputs. @@ -108,42 +98,38 @@ system: let make-pkgs = - crossSystem: stdenv: - import nixpkgs { - localSystem = { - inherit system; - }; - crossSystem = - if crossSystem == null then - null - else - { - config = crossSystem; - } - // lib.optionalAttrs (crossSystem == "x86_64-unknown-freebsd13") { - useLLVM = true; - }; - overlays = [ - (overlayFor (p: p.${stdenv})) - ]; - }; - stdenvs = forAllStdenvs (make-pkgs null); - native = stdenvs.stdenvPackages; + crossSystem: + forAllStdenvs ( + stdenv: + import nixpkgs { + localSystem = { + inherit system; + }; + crossSystem = + if crossSystem == null then + null + else + { + config = crossSystem; + } + // lib.optionalAttrs (crossSystem == "x86_64-unknown-freebsd13") { + useLLVM = true; + }; + overlays = [ + (overlayFor (pkgs: pkgs.${stdenv})) + ]; + } + ); in - { - inherit stdenvs native; - static = native.pkgsStatic; - llvm = native.pkgsLLVM; - cross = forAllCrossSystems (crossSystem: make-pkgs crossSystem "stdenv"); + rec { + nativeForStdenv = make-pkgs null; + crossForStdenv = forAllCrossSystems make-pkgs; + # Alias for convenience + native = nativeForStdenv.stdenv; + cross = forAllCrossSystems (crossSystem: crossForStdenv.${crossSystem}.stdenv); } ); - binaryTarball = - nix: pkgs: - pkgs.callPackage ./scripts/binary-tarball.nix { - inherit nix; - }; - overlayFor = getStdenv: final: prev: let @@ -213,7 +199,6 @@ hydraJobs = import ./packaging/hydra.nix { inherit inputs - binaryTarball forAllCrossSystems forAllSystems lib @@ -228,7 +213,6 @@ system: { installerScriptForGHA = self.hydraJobs.installerScriptForGHA.${system}; - #installTests = self.hydraJobs.installTests.${system}; nixpkgsLibTests = self.hydraJobs.tests.nixpkgsLibTests.${system}; rl-next = let @@ -283,7 +267,7 @@ # TODO: enable static builds for darwin, blocked on: # https://github.com/NixOS/nixpkgs/issues/320448 # TODO: disabled to speed up GHA CI. - #"static-" = nixpkgsFor.${system}.static; + #"static-" = nixpkgsFor.${system}.native.pkgsStatic; } ) ( @@ -401,8 +385,6 @@ { # These attributes go right into `packages.`. "${pkgName}" = nixpkgsFor.${system}.native.nixComponents.${pkgName}; - #"${pkgName}-static" = nixpkgsFor.${system}.static.nixComponents.${pkgName}; - #"${pkgName}-llvm" = nixpkgsFor.${system}.llvm.nixComponents.${pkgName}; } // lib.optionalAttrs supportsCross ( flatMapAttrs (lib.genAttrs crossSystems (_: { })) ( @@ -420,7 +402,7 @@ { # These attributes go right into `packages.`. "${pkgName}-${stdenvName}" = - nixpkgsFor.${system}.stdenvs."${stdenvName}Packages".nixComponents.${pkgName}; + nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.nixComponents.${pkgName}; } ) ) @@ -455,40 +437,13 @@ forAllStdenvs ( stdenvName: makeShell { - pkgs = nixpkgsFor.${system}.stdenvs."${stdenvName}Packages"; + pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}; } ) ) - /* - // lib.optionalAttrs (!nixpkgsFor.${system}.native.stdenv.isDarwin) ( - prefixAttrs "static" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.stdenvs."${stdenvName}Packages".pkgsStatic; - } - ) - ) - // prefixAttrs "llvm" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.stdenvs."${stdenvName}Packages".pkgsLLVM; - } - ) - ) - // prefixAttrs "cross" ( - forAllCrossSystems ( - crossSystem: - makeShell { - pkgs = nixpkgsFor.${system}.cross.${crossSystem}; - } - ) - ) - ) - */ // { - default = self.devShells.${system}.native-stdenvPackages; + native = self.devShells.${system}.native-stdenv; + default = self.devShells.${system}.native; } ); }; diff --git a/maintainers/data/release-credits-email-to-handle.json b/maintainers/data/release-credits-email-to-handle.json index 85f61f7e3..8f5031474 100644 --- a/maintainers/data/release-credits-email-to-handle.json +++ b/maintainers/data/release-credits-email-to-handle.json @@ -132,5 +132,18 @@ "140354451+myclevorname@users.noreply.github.com": "myclevorname", "bonniot@gmail.com": "dbdr", "jack@wilsdon.me": "jackwilsdon", - "143541718+WxNzEMof@users.noreply.github.com": "the-sun-will-rise-tomorrow" + "143541718+WxNzEMof@users.noreply.github.com": "the-sun-will-rise-tomorrow", + "fabianm88@gmail.com": "B4dM4n", + "silvan.mosberger@moduscreate.com": "infinisil", + "leandro.reina@ororatech.com": "kip93", + "else@someonex.net": "SomeoneSerge", + "aiden@aidenfoxivey.com": "aidenfoxivey", + "maxoscarhearnden@gmail.com": "MaxHearnden", + "silvanshade@users.noreply.github.com": "silvanshade", + "illia.bobyr@gmail.com": "ilya-bobyr", + "65963536+etherswangel@users.noreply.github.com": "stevalkr", + "thebenmachine+git@gmail.com": "bmillwood", + "leandro@kip93.net": "kip93", + "hello@briancamacho.me": "b-camacho", + "bcamacho@anduril.com": "bcamacho2" } \ No newline at end of file diff --git a/maintainers/data/release-credits-handle-to-name.json b/maintainers/data/release-credits-handle-to-name.json index c517933eb..7149149c0 100644 --- a/maintainers/data/release-credits-handle-to-name.json +++ b/maintainers/data/release-credits-handle-to-name.json @@ -118,5 +118,16 @@ "wh0": null, "mupdt": "Matej Urbas", "momeemt": "Mutsuha Asada", - "dwt": "\u202erekc\u00e4H nitraM\u202e" + "dwt": "\u202erekc\u00e4H nitraM\u202e", + "aidenfoxivey": "Aiden Fox Ivey", + "ilya-bobyr": "Illia Bobyr", + "B4dM4n": "Fabian M\u00f6ller", + "silvanshade": null, + "bcamacho2": null, + "bmillwood": "Ben Millwood", + "stevalkr": "Steve Walker", + "SomeoneSerge": "Someone", + "b-camacho": "Brian Camacho", + "MaxHearnden": null, + "kip93": "Leandro Emmanuel Reina Kiperman" } \ No newline at end of file diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2f19072ee..4c75df246 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -37,6 +37,34 @@ fi ''}"; }; + nixfmt-rfc-style = { + enable = true; + excludes = [ + # Invalid + ''^tests/functional/lang/parse-.*\.nix$'' + + # Formatting-sensitive + ''^tests/functional/lang/eval-okay-curpos\.nix$'' + ''^tests/functional/lang/.*comment.*\.nix$'' + ''^tests/functional/lang/.*newline.*\.nix$'' + ''^tests/functional/lang/.*eol.*\.nix$'' + + # Syntax tests + ''^tests/functional/shell.shebang\.nix$'' + ''^tests/functional/lang/eval-okay-ind-string\.nix$'' + + # Not supported by nixfmt + ''^tests/functional/lang/eval-okay-deprecate-cursed-or\.nix$'' + ''^tests/functional/lang/eval-okay-attrs5\.nix$'' + + # More syntax tests + # These tests, or parts of them, should have been parse-* test cases. + ''^tests/functional/lang/eval-fail-eol-2\.nix$'' + ''^tests/functional/lang/eval-fail-path-slash\.nix$'' + ''^tests/functional/lang/eval-fail-toJSON-non-utf-8\.nix$'' + ''^tests/functional/lang/eval-fail-set\.nix$'' + ]; + }; clang-format = { enable = true; # https://github.com/cachix/git-hooks.nix/pull/532 @@ -99,7 +127,6 @@ ''^src/libexpr/nixexpr\.cc$'' ''^src/libexpr/nixexpr\.hh$'' ''^src/libexpr/parser-state\.hh$'' - ''^src/libexpr/pos-table\.hh$'' ''^src/libexpr/primops\.cc$'' ''^src/libexpr/primops\.hh$'' ''^src/libexpr/primops/context\.cc$'' @@ -369,7 +396,6 @@ ''^src/libutil/types\.hh$'' ''^src/libutil/unix/file-descriptor\.cc$'' ''^src/libutil/unix/file-path\.cc$'' - ''^src/libutil/unix/monitor-fd\.hh$'' ''^src/libutil/unix/processes\.cc$'' ''^src/libutil/unix/signals-impl\.hh$'' ''^src/libutil/unix/signals\.cc$'' @@ -666,7 +692,6 @@ ''^src/libutil-tests/data/git/check-data\.sh$'' ]; }; - # TODO: nixfmt, https://github.com/NixOS/nixfmt/issues/153 }; }; }; diff --git a/maintainers/release-process.md b/maintainers/release-process.md index bf3c308cf..f2c61302b 100644 --- a/maintainers/release-process.md +++ b/maintainers/release-process.md @@ -144,12 +144,10 @@ release: Make a pull request and auto-merge it. -* Create a milestone for the next release, move all unresolved issues - from the previous milestone, and close the previous milestone. Set - the date for the next milestone 6 weeks from now. - * Create a backport label. +* Add the new backport label to `.mergify.yml`. + * Post an [announcement on Discourse](https://discourse.nixos.org/c/announcements/8), including the contents of `rl-$VERSION.md`. diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 8a470c7cc..31a9c71d5 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -42,7 +42,7 @@ my $flakeUrl = $evalInfo->{flake}; my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die) if $flakeUrl; my $nixRev = ($flakeInfo ? $flakeInfo->{revision} : $evalInfo->{jobsetevalinputs}->{nix}->{revision}) or die; -my $buildInfo = decode_json(fetch("$evalUrl/job/build.nix.x86_64-linux", 'application/json')); +my $buildInfo = decode_json(fetch("$evalUrl/job/build.nix-everything.x86_64-linux", 'application/json')); #print Dumper($buildInfo); my $releaseName = $buildInfo->{nixname}; @@ -91,7 +91,7 @@ sub getStorePath { sub copyManual { my $manual; eval { - $manual = getStorePath("build.nix.x86_64-linux", "doc"); + $manual = getStorePath("manual"); }; if ($@) { warn "$@"; @@ -240,12 +240,12 @@ if ($haveDocker) { # Upload nix-fallback-paths.nix. write_file("$tmpDir/fallback-paths.nix", "{\n" . - " x86_64-linux = \"" . getStorePath("build.nix.x86_64-linux") . "\";\n" . - " i686-linux = \"" . getStorePath("build.nix.i686-linux") . "\";\n" . - " aarch64-linux = \"" . getStorePath("build.nix.aarch64-linux") . "\";\n" . - " riscv64-linux = \"" . getStorePath("buildCross.nix.riscv64-unknown-linux-gnu.x86_64-linux") . "\";\n" . - " x86_64-darwin = \"" . getStorePath("build.nix.x86_64-darwin") . "\";\n" . - " aarch64-darwin = \"" . getStorePath("build.nix.aarch64-darwin") . "\";\n" . + " x86_64-linux = \"" . getStorePath("build.nix-everything.x86_64-linux") . "\";\n" . + " i686-linux = \"" . getStorePath("build.nix-everything.i686-linux") . "\";\n" . + " aarch64-linux = \"" . getStorePath("build.nix-everything.aarch64-linux") . "\";\n" . + " riscv64-linux = \"" . getStorePath("buildCross.nix-everything.riscv64-unknown-linux-gnu.x86_64-linux") . "\";\n" . + " x86_64-darwin = \"" . getStorePath("build.nix-everything.x86_64-darwin") . "\";\n" . + " aarch64-darwin = \"" . getStorePath("build.nix-everything.aarch64-darwin") . "\";\n" . "}\n"); # Upload release files to S3. diff --git a/nix-meson-build-support/big-objs/meson.build b/nix-meson-build-support/big-objs/meson.build new file mode 100644 index 000000000..7e422abd8 --- /dev/null +++ b/nix-meson-build-support/big-objs/meson.build @@ -0,0 +1,6 @@ +if host_machine.system() == 'windows' + # libexpr's primops creates a large object + # Without the following flag, we'll get errors when cross-compiling to mingw32: + # Fatal error: can't write 66 bytes to section .text of src/libexpr/libnixexpr.dll.p/primops.cc.obj: 'file too big' + add_project_arguments([ '-Wa,-mbig-obj' ], language: 'cpp') +endif diff --git a/scripts/binary-tarball.nix b/packaging/binary-tarball.nix similarity index 85% rename from scripts/binary-tarball.nix rename to packaging/binary-tarball.nix index 580e3859f..2050384b0 100644 --- a/scripts/binary-tarball.nix +++ b/packaging/binary-tarball.nix @@ -26,18 +26,18 @@ in runCommand "nix-binary-tarball-${version}" env '' cp ${installerClosureInfo}/registration $TMPDIR/reginfo - cp ${./create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh - substitute ${./install-nix-from-tarball.sh} $TMPDIR/install \ + cp ${../scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh + substitute ${../scripts/install-nix-from-tarball.sh} $TMPDIR/install \ --subst-var-by nix ${nix} \ --subst-var-by cacert ${cacert} - substitute ${./install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ + substitute ${../scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ --subst-var-by nix ${nix} \ --subst-var-by cacert ${cacert} - substitute ${./install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ + substitute ${../scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ --subst-var-by nix ${nix} \ --subst-var-by cacert ${cacert} - substitute ${./install-multi-user.sh} $TMPDIR/install-multi-user \ + substitute ${../scripts/install-multi-user.sh} $TMPDIR/install-multi-user \ --subst-var-by nix ${nix} \ --subst-var-by cacert ${cacert} diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index a5a2426a4..6f06bc2d8 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -1,4 +1,7 @@ -{ lib, devFlake }: +{ + lib, + devFlake, +}: { pkgs }: @@ -113,6 +116,7 @@ pkgs.nixComponents.nix-util.overrideAttrs ( pkgs.buildPackages.changelog-d modular.pre-commit.settings.package (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) + pkgs.buildPackages.nixfmt-rfc-style ] # TODO: Remove the darwin check once # https://github.com/NixOS/nixpkgs/pull/291814 is available diff --git a/packaging/everything.nix b/packaging/everything.nix index fa84f2c90..8dab8c67c 100644 --- a/packaging/everything.nix +++ b/packaging/everything.nix @@ -192,7 +192,7 @@ stdenv.mkDerivation (finalAttrs: { devPaths = lib.mapAttrsToList (_k: lib.getDev) finalAttrs.finalPackage.libs; in '' - mkdir -p $out $dev $doc $man + mkdir -p $out $dev # Merged outputs lndir ${nix-cli} $out @@ -201,8 +201,8 @@ stdenv.mkDerivation (finalAttrs: { done # Forwarded outputs - ln -s ${nix-manual} $doc - ln -s ${nix-manual.man} $man + ln -sT ${nix-manual} $doc + ln -sT ${nix-manual.man} $man ''; passthru = { diff --git a/packaging/hydra.nix b/packaging/hydra.nix index debd98cf2..243e61165 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -1,6 +1,5 @@ { inputs, - binaryTarball, forAllCrossSystems, forAllSystems, lib, @@ -14,7 +13,7 @@ let installScriptFor = tarballs: - nixpkgsFor.x86_64-linux.native.callPackage ../scripts/installer.nix { + nixpkgsFor.x86_64-linux.native.callPackage ./installer { inherit tarballs; }; @@ -65,65 +64,6 @@ in system: self.devShells.${system}.default.inputDerivation )) [ "i686-linux" ]; - /* - buildStatic = forAllPackages ( - pkgName: - lib.genAttrs linux64BitSystems (system: nixpkgsFor.${system}.static.nixComponents.${pkgName}) - ); - - buildCross = forAllPackages ( - pkgName: - # Hack to avoid non-evaling package - ( - if pkgName == "nix-functional-tests" then - lib.flip builtins.removeAttrs [ "x86_64-w64-mingw32" ] - else - lib.id - ) - ( - forAllCrossSystems ( - crossSystem: - lib.genAttrs [ "x86_64-linux" ] ( - system: nixpkgsFor.${system}.cross.${crossSystem}.nixComponents.${pkgName} - ) - ) - ) - ); - - buildNoGc = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents.overrideScope ( - self: super: { - nix-expr = super.nix-expr.override { enableGC = false; }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - - buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents.nix-cli); - - # Toggles some settings for better coverage. Windows needs these - # library combinations, and Debian build Nix with GNU readline too. - buildReadlineNoMarkdown = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents.overrideScope ( - self: super: { - nix-cmd = super.nix-cmd.override { - enableMarkdown = false; - readlineFlavor = "readline"; - }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - */ - # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents.nix-perl-bindings); @@ -131,40 +71,12 @@ in # with the closure of 'nix' package, and the second half of # the installation script. binaryTarball = forAllSystems ( - system: binaryTarball nixpkgsFor.${system}.native.nix nixpkgsFor.${system}.native + system: nixpkgsFor.${system}.native.callPackage ./binary-tarball.nix { } ); - /* - binaryTarballCross = lib.genAttrs [ "x86_64-linux" ] ( - system: - forAllCrossSystems ( - crossSystem: - binaryTarball nixpkgsFor.${system}.cross.${crossSystem}.nix - nixpkgsFor.${system}.cross.${crossSystem} - ) - ); - - # The first half of the installation script. This is uploaded - # to https://nixos.org/nix/install. It downloads the binary - # tarball for the user's system and calls the second half of the - # installation script. - installerScript = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."i686-linux" - self.hydraJobs.binaryTarball."aarch64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - self.hydraJobs.binaryTarball."aarch64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - */ - installerScriptForGHA = forAllSystems ( system: - nixpkgsFor.${system}.native.callPackage ../scripts/installer.nix { + nixpkgsFor.${system}.native.callPackage ./installer { tarballs = [ self.hydraJobs.binaryTarball.${system} ]; } ); @@ -190,12 +102,8 @@ in # System tests. tests = import ../tests/nixos { - inherit - lib - nixpkgs - nixpkgsFor - self - ; + inherit lib nixpkgs nixpkgsFor; + inherit (self.inputs) nixpkgs-23-11; } // { @@ -230,27 +138,4 @@ in pkgs = nixpkgsFor.x86_64-linux.native; nixpkgs = nixpkgs-regression; }; - - /* - installTests = forAllSystems ( - system: - let - pkgs = nixpkgsFor.${system}.native; - in - pkgs.runCommand "install-tests" { - againstSelf = testNixVersions pkgs pkgs.nix; - againstCurrentLatest = - # FIXME: temporarily disable this on macOS because of #3605. - if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; - # Disabled because the latest stable version doesn't handle - # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work - # againstLatestStable = testNixVersions pkgs pkgs.nixStable; - } "touch $out" - ); - - installerTests = import ../tests/installer { - binaryTarballs = self.hydraJobs.binaryTarball; - inherit nixpkgsFor; - }; - */ } diff --git a/scripts/installer.nix b/packaging/installer/default.nix similarity index 100% rename from scripts/installer.nix rename to packaging/installer/default.nix diff --git a/scripts/install.in b/packaging/installer/install.in similarity index 100% rename from scripts/install.in rename to packaging/installer/install.in diff --git a/scripts/nix-profile-daemon.fish.in b/scripts/nix-profile-daemon.fish.in index 346dce5dd..2ecab0077 100644 --- a/scripts/nix-profile-daemon.fish.in +++ b/scripts/nix-profile-daemon.fish.in @@ -1,3 +1,13 @@ +# Only execute this file once per shell. +if test -z "$HOME" || \ + test -n "$__ETC_PROFILE_NIX_SOURCED" + exit +end + +set --global __ETC_PROFILE_NIX_SOURCED 1 + +# Local helpers + function add_path --argument-names new_path if type -q fish_add_path # fish 3.2.0 or newer @@ -10,48 +20,51 @@ function add_path --argument-names new_path end end -# Only execute this file once per shell. -if test -n "$__ETC_PROFILE_NIX_SOURCED" - exit -end +# Main configuration -set __ETC_PROFILE_NIX_SOURCED 1 +# Set up the per-user profile. +set --local NIX_LINK $HOME/.nix-profile + +# Set up environment. +# This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" # Populate bash completions, .desktop files, etc if test -z "$XDG_DATA_DIRS" # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default - set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:/nix/var/nix/profiles/default/share" + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" else - set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:/nix/var/nix/profiles/default/share" + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" end # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if test -n "$NIX_SSL_CERT_FILE" - : # Allow users to override the NIX_SSL_CERT_FILE + : # Allow users to override the NIX_SSL_CERT_FILE else if test -e /etc/ssl/certs/ca-certificates.crt # NixOS, Ubuntu, Debian, Gentoo, Arch - set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-certificates.crt + set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-certificates.crt else if test -e /etc/ssl/ca-bundle.pem # openSUSE Tumbleweed - set --export NIX_SSL_CERT_FILE /etc/ssl/ca-bundle.pem + set --export NIX_SSL_CERT_FILE /etc/ssl/ca-bundle.pem else if test -e /etc/ssl/certs/ca-bundle.crt # Old NixOS - set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-bundle.crt + set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-bundle.crt else if test -e /etc/pki/tls/certs/ca-bundle.crt # Fedora, CentOS - set --export NIX_SSL_CERT_FILE /etc/pki/tls/certs/ca-bundle.crt + set --export NIX_SSL_CERT_FILE /etc/pki/tls/certs/ca-bundle.crt else if test -e "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" # fall back to cacert in Nix profile - set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" + set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" else if test -e "$NIX_LINK/etc/ca-bundle.crt" # old cacert in Nix profile - set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ca-bundle.crt" + set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ca-bundle.crt" else - # Fall back to what is in the nix profiles, favouring whatever is defined last. - for i in (string split ' ' $NIX_PROFILES) - if test -e "$i/etc/ssl/certs/ca-bundle.crt" - set --export NIX_SSL_CERT_FILE "$i/etc/ssl/certs/ca-bundle.crt" + # Fall back to what is in the nix profiles, favouring whatever is defined last. + for i in (string split ' ' $NIX_PROFILES) + if test -e "$i/etc/ssl/certs/ca-bundle.crt" + set --export NIX_SSL_CERT_FILE "$i/etc/ssl/certs/ca-bundle.crt" + end end - end end add_path "@localstatedir@/nix/profiles/default/bin" -add_path "$HOME/.nix-profile/bin" +add_path "$NIX_LINK/bin" + +# Cleanup functions -e add_path diff --git a/scripts/nix-profile.fish.in b/scripts/nix-profile.fish.in index 3a8c234ad..05d9a187d 100644 --- a/scripts/nix-profile.fish.in +++ b/scripts/nix-profile.fish.in @@ -1,3 +1,13 @@ +# Only execute this file once per shell. +if test -z "$HOME" || test -z "$USER" || \ + test -n "$__ETC_PROFILE_NIX_SOURCED" + exit +end + +set --global __ETC_PROFILE_NIX_SOURCED 1 + +# Local helpers + function add_path --argument-names new_path if type -q fish_add_path # fish 3.2.0 or newer @@ -10,50 +20,50 @@ function add_path --argument-names new_path end end -if test -n "$HOME" && test -n "$USER" +# Main configuration - # Set up the per-user profile. +# Set up the per-user profile. - set NIX_LINK $HOME/.nix-profile +set --local NIX_LINK $HOME/.nix-profile - # Set up environment. - # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix - set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" +# Set up environment. +# This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix +set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" - # Populate bash completions, .desktop files, etc - if test -z "$XDG_DATA_DIRS" - # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default - set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" - else - set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" - end - - # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. - if test -n "$NIX_SSL_CERT_FILE" - : # Allow users to override the NIX_SSL_CERT_FILE - else if test -e /etc/ssl/certs/ca-certificates.crt # NixOS, Ubuntu, Debian, Gentoo, Arch - set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-certificates.crt - else if test -e /etc/ssl/ca-bundle.pem # openSUSE Tumbleweed - set --export NIX_SSL_CERT_FILE /etc/ssl/ca-bundle.pem - else if test -e /etc/ssl/certs/ca-bundle.crt # Old NixOS - set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-bundle.crt - else if test -e /etc/pki/tls/certs/ca-bundle.crt # Fedora, CentOS - set --export NIX_SSL_CERT_FILE /etc/pki/tls/certs/ca-bundle.crt - else if test -e "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" # fall back to cacert in Nix profile - set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" - else if test -e "$NIX_LINK/etc/ca-bundle.crt" # old cacert in Nix profile - set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ca-bundle.crt" - end - - # Only use MANPATH if it is already set. In general `man` will just simply - # pick up `.nix-profile/share/man` because is it close to `.nix-profile/bin` - # which is in the $PATH. For more info, run `manpath -d`. - if set --query MANPATH - set --export --prepend --path MANPATH "$NIX_LINK/share/man" - end - - add_path "$NIX_LINK/bin" - set --erase NIX_LINK +# Populate bash completions, .desktop files, etc +if test -z "$XDG_DATA_DIRS" + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +else + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" end +# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. +if test -n "$NIX_SSL_CERT_FILE" + : # Allow users to override the NIX_SSL_CERT_FILE +else if test -e /etc/ssl/certs/ca-certificates.crt # NixOS, Ubuntu, Debian, Gentoo, Arch + set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-certificates.crt +else if test -e /etc/ssl/ca-bundle.pem # openSUSE Tumbleweed + set --export NIX_SSL_CERT_FILE /etc/ssl/ca-bundle.pem +else if test -e /etc/ssl/certs/ca-bundle.crt # Old NixOS + set --export NIX_SSL_CERT_FILE /etc/ssl/certs/ca-bundle.crt +else if test -e /etc/pki/tls/certs/ca-bundle.crt # Fedora, CentOS + set --export NIX_SSL_CERT_FILE /etc/pki/tls/certs/ca-bundle.crt +else if test -e "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" # fall back to cacert in Nix profile + set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" +else if test -e "$NIX_LINK/etc/ca-bundle.crt" # old cacert in Nix profile + set --export NIX_SSL_CERT_FILE "$NIX_LINK/etc/ca-bundle.crt" +end + +# Only use MANPATH if it is already set. In general `man` will just simply +# pick up `.nix-profile/share/man` because is it close to `.nix-profile/bin` +# which is in the $PATH. For more info, run `manpath -d`. +if set --query MANPATH + set --export --prepend --path MANPATH "$NIX_LINK/share/man" +end + +add_path "$NIX_LINK/bin" + +# Cleanup + functions -e add_path diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 82ad7d862..88b704288 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -51,7 +51,7 @@ static bool allSupportedLocally(Store & store, const std::set& requ static int main_build_remote(int argc, char * * argv) { { - logger = makeJSONLogger(*logger); + logger = makeJSONLogger(getStandardError()); /* Ensure we don't get any SSH passphrase or host key popups. */ unsetenv("DISPLAY"); diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh index 23529848f..9570ce3e7 100644 --- a/src/libcmd/command.hh +++ b/src/libcmd/command.hh @@ -347,7 +347,7 @@ struct MixEnvironment : virtual Args void setEnviron(); }; -void completeFlakeInputPath( +void completeFlakeInputAttrPath( AddCompletions & completions, ref evalState, const std::vector & flakeRefs, diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 36af9120e..da460c49b 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -33,8 +33,10 @@ EvalSettings evalSettings { // FIXME `parseFlakeRef` should take a `std::string_view`. auto flakeRef = parseFlakeRef(fetchSettings, std::string { rest }, {}, true, false); debug("fetching flake search path element '%s''", rest); - auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first; - return state.rootPath(state.store->toRealPath(storePath)); + auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store); + auto storePath = nix::fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy, lockedRef.input.getName()); + state.allowPath(storePath); + return state.storePath(storePath); }, }, }, @@ -176,13 +178,15 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas state.fetchSettings, EvalSettings::resolvePseudoUrl(s)); auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy); - return state.rootPath(CanonPath(state.store->toRealPath(storePath))); + return state.storePath(storePath); } else if (hasPrefix(s, "flake:")) { auto flakeRef = parseFlakeRef(fetchSettings, std::string(s.substr(6)), {}, true, false); - auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first; - return state.rootPath(CanonPath(state.store->toRealPath(storePath))); + auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store); + auto storePath = nix::fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy, lockedRef.input.getName()); + state.allowPath(storePath); + return state.storePath(storePath); } else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 523727cd4..e8ecd4432 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -33,7 +33,7 @@ namespace nix { namespace fs { using namespace std::filesystem; } -void completeFlakeInputPath( +void completeFlakeInputAttrPath( AddCompletions & completions, ref evalState, const std::vector & flakeRefs, @@ -117,10 +117,10 @@ MixFlakeOptions::MixFlakeOptions() .labels = {"input-path"}, .handler = {[&](std::string s) { warn("'--update-input' is a deprecated alias for 'flake update' and will be removed in a future version."); - lockFlags.inputUpdates.insert(flake::parseInputPath(s)); + lockFlags.inputUpdates.insert(flake::parseInputAttrPath(s)); }}, .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { - completeFlakeInputPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix); + completeFlakeInputAttrPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix); }} }); @@ -129,15 +129,15 @@ MixFlakeOptions::MixFlakeOptions() .description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`). This implies `--no-write-lock-file`.", .category = category, .labels = {"input-path", "flake-url"}, - .handler = {[&](std::string inputPath, std::string flakeRef) { + .handler = {[&](std::string inputAttrPath, std::string flakeRef) { lockFlags.writeLockFile = false; lockFlags.inputOverrides.insert_or_assign( - flake::parseInputPath(inputPath), + flake::parseInputAttrPath(inputAttrPath), parseFlakeRef(fetchSettings, flakeRef, absPath(getCommandBaseDir()), true)); }}, .completer = {[&](AddCompletions & completions, size_t n, std::string_view prefix) { if (n == 0) { - completeFlakeInputPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix); + completeFlakeInputAttrPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix); } else if (n == 1) { completeFlakeRef(completions, getEvalState()->store, prefix); } diff --git a/src/libcmd/misc-store-flags.cc b/src/libcmd/misc-store-flags.cc index 06552c032..4e29e8981 100644 --- a/src/libcmd/misc-store-flags.cc +++ b/src/libcmd/misc-store-flags.cc @@ -50,7 +50,7 @@ Args::Flag hashAlgo(std::string && longName, HashAlgorithm * ha) { return Args::Flag { .longName = std::move(longName), - .description = "Hash algorithm (`md5`, `sha1`, `sha256`, or `sha512`).", + .description = "Hash algorithm (`blake3`, `md5`, `sha1`, `sha256`, or `sha512`).", .labels = {"hash-algo"}, .handler = {[ha](std::string s) { *ha = parseHashAlgo(s); @@ -63,7 +63,7 @@ Args::Flag hashAlgoOpt(std::string && longName, std::optional * o { return Args::Flag { .longName = std::move(longName), - .description = "Hash algorithm (`md5`, `sha1`, `sha256`, or `sha512`). Can be omitted for SRI hashes.", + .description = "Hash algorithm (`blake3`, `md5`, `sha1`, `sha256`, or `sha512`). Can be omitted for SRI hashes.", .labels = {"hash-algo"}, .handler = {[oha](std::string s) { *oha = std::optional{parseHashAlgo(s)}; @@ -120,7 +120,7 @@ Args::Flag contentAddressMethod(ContentAddressMethod * method) - [`text`](@docroot@/store/store-object/content-address.md#method-text): Like `flat`, but used for - [derivations](@docroot@/glossary.md#store-derivation) serialized in store object and + [derivations](@docroot@/glossary.md#gloss-store-derivation) serialized in store object and [`builtins.toFile`](@docroot@/language/builtins.html#builtins-toFile). For advanced use-cases only; for regular usage prefer `nar` and `flat`. diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index f292f06bb..281e1f6f0 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -101,6 +101,8 @@ struct NixRepl Value & v, unsigned int maxDepth = std::numeric_limits::max()) { + // Hide the progress bar during printing because it might interfere + auto suspension = logger->suspend(); ::nix::printValue(*state, str, v, PrintOptions { .ansiColors = true, .force = true, @@ -126,7 +128,7 @@ NixRepl::NixRepl(const LookupPath & lookupPath, nix::ref store, refstaticBaseEnv.get())) + , staticEnv(new StaticEnv(nullptr, state->staticBaseEnv)) , runNixPtr{runNix} , interacter(make_unique(getDataDir() + "/repl-history")) { @@ -138,16 +140,13 @@ static std::ostream & showDebugTrace(std::ostream & out, const PosTable & positi out << ANSI_RED "error: " << ANSI_NORMAL; out << dt.hint.str() << "\n"; - // prefer direct pos, but if noPos then try the expr. - auto pos = dt.pos - ? dt.pos - : positions[dt.expr.getPos() ? dt.expr.getPos() : noPos]; + auto pos = dt.getPos(positions); if (pos) { - out << *pos; - if (auto loc = pos->getCodeLines()) { + out << pos; + if (auto loc = pos.getCodeLines()) { out << "\n"; - printCodeLines(out, "", *pos, *loc); + printCodeLines(out, "", pos, *loc); out << "\n"; } } @@ -177,18 +176,20 @@ ReplExitStatus NixRepl::mainLoop() while (true) { // Hide the progress bar while waiting for user input, so that it won't interfere. - logger->pause(); - // When continuing input from previous lines, don't print a prompt, just align to the same - // number of chars as the prompt. - if (!interacter->getLine(input, input.empty() ? ReplPromptType::ReplPrompt : ReplPromptType::ContinuationPrompt)) { - // Ctrl-D should exit the debugger. - state->debugStop = false; - logger->cout(""); - // TODO: Should Ctrl-D exit just the current debugger session or - // the entire program? - return ReplExitStatus::QuitAll; + { + auto suspension = logger->suspend(); + // When continuing input from previous lines, don't print a prompt, just align to the same + // number of chars as the prompt. + if (!interacter->getLine(input, input.empty() ? ReplPromptType::ReplPrompt : ReplPromptType::ContinuationPrompt)) { + // Ctrl-D should exit the debugger. + state->debugStop = false; + logger->cout(""); + // TODO: Should Ctrl-D exit just the current debugger session or + // the entire program? + return ReplExitStatus::QuitAll; + } + // `suspension` resumes the logger } - logger->resume(); try { switch (processLine(input)) { case ProcessLineResult::Quit: @@ -583,6 +584,7 @@ ProcessLineResult NixRepl::processLine(std::string line) else if (command == ":p" || command == ":print") { Value v; evalString(arg, v); + auto suspension = logger->suspend(); if (v.type() == nString) { std::cout << v.string_view(); } else { @@ -691,6 +693,7 @@ ProcessLineResult NixRepl::processLine(std::string line) } else { Value v; evalString(line, v); + auto suspension = logger->suspend(); printValue(std::cout, v, 1); std::cout << std::endl; } diff --git a/src/libexpr-tests/error_traces.cc b/src/libexpr-tests/error_traces.cc index 2aa13cf62..53013a34a 100644 --- a/src/libexpr-tests/error_traces.cc +++ b/src/libexpr-tests/error_traces.cc @@ -1152,7 +1152,7 @@ namespace nix { ASSERT_TRACE1("hashString \"foo\" \"content\"", UsageError, - HintFmt("unknown hash algorithm '%s', expect 'md5', 'sha1', 'sha256', or 'sha512'", "foo")); + HintFmt("unknown hash algorithm '%s', expect 'blake3', 'md5', 'sha1', 'sha256', or 'sha512'", "foo")); ASSERT_TRACE2("hashString \"sha256\" {}", TypeError, diff --git a/src/libexpr-tests/nix_api_expr.cc b/src/libexpr-tests/nix_api_expr.cc index 5ed78d2fc..633224ae6 100644 --- a/src/libexpr-tests/nix_api_expr.cc +++ b/src/libexpr-tests/nix_api_expr.cc @@ -172,7 +172,7 @@ TEST_F(nix_api_expr_test, nix_expr_realise_context_bad_build) TEST_F(nix_api_expr_test, nix_expr_realise_context) { - // TODO (ca-derivations): add a content-addressed derivation output, which produces a placeholder + // TODO (ca-derivations): add a content-addressing derivation output, which produces a placeholder auto expr = R"( '' a derivation output: ${ diff --git a/src/libexpr-tests/primops.cc b/src/libexpr-tests/primops.cc index 5b5898237..2bf726477 100644 --- a/src/libexpr-tests/primops.cc +++ b/src/libexpr-tests/primops.cc @@ -28,20 +28,15 @@ namespace nix { }; class CaptureLogging { - Logger * oldLogger; - std::unique_ptr tempLogger; + std::unique_ptr oldLogger; public: - CaptureLogging() : tempLogger(std::make_unique()) { - oldLogger = logger; - logger = tempLogger.get(); + CaptureLogging() { + oldLogger = std::move(logger); + logger = std::make_unique(); } ~CaptureLogging() { - logger = oldLogger; - } - - std::string get() const { - return tempLogger->get(); + logger = std::move(oldLogger); } }; @@ -113,7 +108,7 @@ namespace nix { CaptureLogging l; auto v = eval("builtins.trace \"test string 123\" 123"); ASSERT_THAT(v, IsIntEq(123)); - auto text = l.get(); + auto text = (dynamic_cast(logger.get()))->get(); ASSERT_NE(text.find("test string 123"), std::string::npos); } diff --git a/src/libexpr/call-flake.nix b/src/libexpr/call-flake.nix index 9b38644bb..1e9e21048 100644 --- a/src/libexpr/call-flake.nix +++ b/src/libexpr/call-flake.nix @@ -23,7 +23,7 @@ let resolveInput = inputSpec: if builtins.isList inputSpec then getInputByPath lockFile.root inputSpec else inputSpec; - # Follow an input path (e.g. ["dwarffs" "nixpkgs"]) from the + # Follow an input attrpath (e.g. ["dwarffs" "nixpkgs"]) from the # root node, returning the final node. getInputByPath = nodeName: path: diff --git a/src/libexpr/eval-error.cc b/src/libexpr/eval-error.cc index cdb0b4772..b9742d3ea 100644 --- a/src/libexpr/eval-error.cc +++ b/src/libexpr/eval-error.cc @@ -45,7 +45,7 @@ EvalErrorBuilder & EvalErrorBuilder::withFrame(const Env & env, const Expr // TODO: check compatibility with nested debugger calls. // TODO: What side-effects?? error.state.debugTraces.push_front(DebugTrace{ - .pos = error.state.positions[expr.getPos()], + .pos = expr.getPos(), .expr = expr, .env = env, .hint = HintFmt("Fake frame for debugging purposes"), diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh index 631c0f396..5d1a0c4d6 100644 --- a/src/libexpr/eval-inline.hh +++ b/src/libexpr/eval-inline.hh @@ -146,7 +146,7 @@ inline void EvalState::forceList(Value & v, const PosIdx pos, std::string_view e [[gnu::always_inline]] inline CallDepth EvalState::addCallDepth(const PosIdx pos) { if (callDepth > settings.maxCallDepth) - error("stack overflow; max-call-depth exceeded").atPos(pos).debugThrow(); + error("stack overflow; max-call-depth exceeded").atPos(pos).debugThrow(); return CallDepth(callDepth); }; diff --git a/src/libexpr/eval-settings.hh b/src/libexpr/eval-settings.hh index 4740c2983..3f8383dd6 100644 --- a/src/libexpr/eval-settings.hh +++ b/src/libexpr/eval-settings.hh @@ -2,7 +2,6 @@ ///@file #include "config.hh" -#include "ref.hh" #include "source-path.hh" namespace nix { diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 8aef85dc5..4e15175ac 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -246,15 +246,42 @@ EvalState::EvalState( , repair(NoRepair) , emptyBindings(0) , rootFS( - settings.restrictEval || settings.pureEval - ? ref(AllowListSourceAccessor::create(getFSSourceAccessor(), {}, - [&settings](const CanonPath & path) -> RestrictedPathError { - auto modeInformation = settings.pureEval - ? "in pure evaluation mode (use '--impure' to override)" - : "in restricted mode"; - throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation); - })) - : getFSSourceAccessor()) + ({ + /* In pure eval mode, we provide a filesystem that only + contains the Nix store. + + If we have a chroot store and pure eval is not enabled, + use a union accessor to make the chroot store available + at its logical location while still having the + underlying directory available. This is necessary for + instance if we're evaluating a file from the physical + /nix/store while using a chroot store. */ + auto accessor = getFSSourceAccessor(); + + auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); + if (settings.pureEval || store->storeDir != realStoreDir) { + auto storeFS = makeMountedSourceAccessor( + { + {CanonPath::root, makeEmptySourceAccessor()}, + {CanonPath(store->storeDir), makeFSSourceAccessor(realStoreDir)} + }); + accessor = settings.pureEval + ? storeFS + : makeUnionSourceAccessor({accessor, storeFS}); + } + + /* Apply access control if needed. */ + if (settings.restrictEval || settings.pureEval) + accessor = AllowListSourceAccessor::create(accessor, {}, + [&settings](const CanonPath & path) -> RestrictedPathError { + auto modeInformation = settings.pureEval + ? "in pure evaluation mode (use '--impure' to override)" + : "in restricted mode"; + throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation); + }); + + accessor; + })) , corepkgsFS(make_ref()) , internalFS(make_ref()) , derivationInternal{corepkgsFS->addFile( @@ -344,7 +371,7 @@ void EvalState::allowPath(const Path & path) void EvalState::allowPath(const StorePath & storePath) { if (auto rootFS2 = rootFS.dynamic_pointer_cast()) - rootFS2->allowPrefix(CanonPath(store->toRealPath(storePath))); + rootFS2->allowPrefix(CanonPath(store->printStorePath(storePath))); } void EvalState::allowClosure(const StorePath & storePath) @@ -422,16 +449,6 @@ void EvalState::checkURI(const std::string & uri) } -Path EvalState::toRealPath(const Path & path, const NixStringContext & context) -{ - // FIXME: check whether 'path' is in 'context'. - return - !context.empty() && store->isInStore(path) - ? store->toRealPath(path) - : path; -} - - Value * EvalState::addConstant(const std::string & name, Value & v, Constant info) { Value * v2 = allocValue(); @@ -754,18 +771,26 @@ void EvalState::runDebugRepl(const Error * error, const Env & env, const Expr & if (!debugRepl || inDebugger) return; - auto dts = - error && expr.getPos() - ? std::make_unique( - *this, - DebugTrace { - .pos = error->info().pos ? error->info().pos : positions[expr.getPos()], + auto dts = [&]() -> std::unique_ptr { + if (error && expr.getPos()) { + auto trace = DebugTrace{ + .pos = [&]() -> std::variant { + if (error->info().pos) { + if (auto * pos = error->info().pos.get()) + return *pos; + return noPos; + } + return expr.getPos(); + }(), .expr = expr, .env = env, .hint = error->info().msg, - .isError = true - }) - : nullptr; + .isError = true}; + + return std::make_unique(*this, std::move(trace)); + } + return nullptr; + }(); if (error) { @@ -810,7 +835,7 @@ static std::unique_ptr makeDebugTraceStacker( EvalState & state, Expr & expr, Env & env, - std::shared_ptr && pos, + std::variant pos, const Args & ... formatArgs) { return std::make_unique(state, @@ -1087,7 +1112,7 @@ void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) *this, *e, this->baseEnv, - e->getPos() ? std::make_shared(positions[e->getPos()]) : nullptr, + e->getPos(), "while evaluating the file '%1%':", resolvedPath.to_string()) : nullptr; @@ -1313,9 +1338,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v) state, *this, env2, - getPos() - ? std::make_shared(state.positions[getPos()]) - : nullptr, + getPos(), "while evaluating a '%1%' expression", "let" ) @@ -1384,7 +1407,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) state, *this, env, - state.positions[getPos()], + getPos(), "while evaluating the attribute '%1%'", showAttrPath(state, env, attrPath)) : nullptr; @@ -1585,7 +1608,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, try { auto dts = debugRepl ? makeDebugTraceStacker( - *this, *lambda.body, env2, positions[lambda.pos], + *this, *lambda.body, env2, lambda.pos, "while calling %s", lambda.name ? concatStrings("'", symbols[lambda.name], "'") @@ -1720,9 +1743,7 @@ void ExprCall::eval(EvalState & state, Env & env, Value & v) state, *this, env, - getPos() - ? std::make_shared(state.positions[getPos()]) - : nullptr, + getPos(), "while calling a function" ) : nullptr; @@ -2051,7 +2072,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) else if (firstType == nPath) { if (!context.empty()) state.error("a string that refers to a store path cannot be appended to a path").atPos(pos).withFrame(env, *this).debugThrow(); - v.mkPath(state.rootPath(CanonPath(canonPath(str())))); + v.mkPath(state.rootPath(CanonPath(str()))); } else v.mkStringMove(c_str(), context); } @@ -2106,7 +2127,7 @@ void EvalState::forceValueDeep(Value & v) try { // If the value is a thunk, we're evaling. Otherwise no trace necessary. auto dts = debugRepl && i.value->isThunk() - ? makeDebugTraceStacker(*this, *i.value->payload.thunk.expr, *i.value->payload.thunk.env, positions[i.pos], + ? makeDebugTraceStacker(*this, *i.value->payload.thunk.expr, *i.value->payload.thunk.env, i.pos, "while evaluating the attribute '%1%'", symbols[i.name]) : nullptr; @@ -2432,7 +2453,7 @@ SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (path == "" || path[0] != '/') error("string '%1%' doesn't represent an absolute path", path).withTrace(pos, errorCtx).debugThrow(); - return rootPath(CanonPath(path)); + return rootPath(path); } @@ -3086,7 +3107,7 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat fetchSettings, EvalSettings::resolvePseudoUrl(value)); auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy); - return finish(rootPath(store->toRealPath(storePath))); + return finish(this->storePath(storePath)); } catch (Error & e) { logWarning({ .msg = HintFmt("Nix search path entry '%1%' cannot be downloaded, ignoring", value) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 767578343..eb6f667a2 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -171,11 +171,28 @@ struct RegexCache; std::shared_ptr makeRegexCache(); struct DebugTrace { - std::shared_ptr pos; + /* WARNING: Converting PosIdx -> Pos should be done with extra care. This is + due to the fact that operator[] of PosTable is incredibly expensive. */ + std::variant pos; const Expr & expr; const Env & env; HintFmt hint; bool isError; + + Pos getPos(const PosTable & table) const + { + return std::visit( + overloaded{ + [&](PosIdx idx) { + // Prefer direct pos, but if noPos then try the expr. + if (!idx) + idx = expr.getPos(); + return table[idx]; + }, + [&](Pos pos) { return pos; }, + }, + pos); + } }; class EvalState : public std::enable_shared_from_this @@ -389,6 +406,15 @@ public: */ SourcePath rootPath(PathView path); + /** + * Return a `SourcePath` that refers to `path` in the store. + * + * For now, this has to also be within the root filesystem for + * backwards compat, but for Windows and maybe also pure eval, we'll + * probably want to do something different. + */ + SourcePath storePath(const StorePath & path); + /** * Allow access to a path. */ @@ -412,17 +438,6 @@ public: void checkURI(const std::string & uri); - /** - * When using a diverted store and 'path' is in the Nix store, map - * 'path' to the diverted location (e.g. /nix/store/foo is mapped - * to /home/alice/my-nix/nix/store/foo). However, this is only - * done if the context is not empty, since otherwise we're - * probably trying to read from the actual /nix/store. This is - * intended to distinguish between import-from-derivation and - * sources stored in the actual /nix/store. - */ - Path toRealPath(const Path & path, const NixStringContext & context); - /** * Parse a Nix expression from the specified file. */ diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index b33aebc86..dffcc1742 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -24,6 +24,7 @@ deps_public_maybe_subproject = [ dependency('nix-fetchers'), ] subdir('nix-meson-build-support/subprojects') +subdir('nix-meson-build-support/big-objs') boost = dependency( 'boost', @@ -171,8 +172,6 @@ headers = [config_h] + files( # internal: 'lexer-helpers.hh', 'nixexpr.hh', 'parser-state.hh', - 'pos-idx.hh', - 'pos-table.hh', 'primops.hh', 'print-ambiguous.hh', 'print-options.hh', diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 063ff0753..f17226728 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -310,7 +310,7 @@ void ExprVar::bindVars(EvalState & es, const std::shared_ptr & const StaticEnv * curEnv; Level level; int withLevel = -1; - for (curEnv = env.get(), level = 0; curEnv; curEnv = curEnv->up, level++) { + for (curEnv = env.get(), level = 0; curEnv; curEnv = curEnv->up.get(), level++) { if (curEnv->isWith) { if (withLevel == -1) withLevel = level; } else { @@ -331,7 +331,7 @@ void ExprVar::bindVars(EvalState & es, const std::shared_ptr & "undefined variable '%1%'", es.symbols[name] ).atPos(pos).debugThrow(); - for (auto * e = env.get(); e && !fromWith; e = e->up) + for (auto * e = env.get(); e && !fromWith; e = e->up.get()) fromWith = e->isWith; this->level = withLevel; } @@ -379,7 +379,7 @@ std::shared_ptr ExprAttrs::bindInheritSources( // and displacement, and nothing else is allowed to access it. ideally we'd // not even *have* an expr that grabs anything from this env since it's fully // invisible, but the evaluator does not allow for this yet. - auto inner = std::make_shared(nullptr, env.get(), 0); + auto inner = std::make_shared(nullptr, env, 0); for (auto from : *inheritFromExprs) from->bindVars(es, env); @@ -393,7 +393,7 @@ void ExprAttrs::bindVars(EvalState & es, const std::shared_ptr if (recursive) { auto newEnv = [&] () -> std::shared_ptr { - auto newEnv = std::make_shared(nullptr, env.get(), attrs.size()); + auto newEnv = std::make_shared(nullptr, env, attrs.size()); Displacement displ = 0; for (auto & i : attrs) @@ -440,7 +440,7 @@ void ExprLambda::bindVars(EvalState & es, const std::shared_ptr es.exprEnvs.insert(std::make_pair(this, env)); auto newEnv = std::make_shared( - nullptr, env.get(), + nullptr, env, (hasFormals() ? formals->formals.size() : 0) + (!arg ? 0 : 1)); @@ -474,7 +474,7 @@ void ExprCall::bindVars(EvalState & es, const std::shared_ptr & void ExprLet::bindVars(EvalState & es, const std::shared_ptr & env) { auto newEnv = [&] () -> std::shared_ptr { - auto newEnv = std::make_shared(nullptr, env.get(), attrs->attrs.size()); + auto newEnv = std::make_shared(nullptr, env, attrs->attrs.size()); Displacement displ = 0; for (auto & i : attrs->attrs) @@ -500,7 +500,7 @@ void ExprWith::bindVars(EvalState & es, const std::shared_ptr & es.exprEnvs.insert(std::make_pair(this, env)); parentWith = nullptr; - for (auto * e = env.get(); e && !parentWith; e = e->up) + for (auto * e = env.get(); e && !parentWith; e = e->up.get()) parentWith = e->isWith; /* Does this `with' have an enclosing `with'? If so, record its @@ -509,14 +509,14 @@ void ExprWith::bindVars(EvalState & es, const std::shared_ptr & const StaticEnv * curEnv; Level level; prevWith = 0; - for (curEnv = env.get(), level = 1; curEnv; curEnv = curEnv->up, level++) + for (curEnv = env.get(), level = 1; curEnv; curEnv = curEnv->up.get(), level++) if (curEnv->isWith) { prevWith = level; break; } attrs->bindVars(es, env); - auto newEnv = std::make_shared(this, env.get()); + auto newEnv = std::make_shared(this, env); body->bindVars(es, newEnv); } @@ -601,41 +601,6 @@ void ExprLambda::setDocComment(DocComment docComment) { } }; - - -/* Position table. */ - -Pos PosTable::operator[](PosIdx p) const -{ - auto origin = resolve(p); - if (!origin) - return {}; - - const auto offset = origin->offsetOf(p); - - Pos result{0, 0, origin->origin}; - auto lines = this->lines.lock(); - auto linesForInput = (*lines)[origin->offset]; - - if (linesForInput.empty()) { - auto source = result.getSource().value_or(""); - const char * begin = source.data(); - for (Pos::LinesIterator it(source), end; it != end; it++) - linesForInput.push_back(it->data() - begin); - if (linesForInput.empty()) - linesForInput.push_back(0); - } - // as above: the first line starts at byte 0 and is always present - auto lineStartOffset = std::prev( - std::upper_bound(linesForInput.begin(), linesForInput.end(), offset)); - - result.line = 1 + (lineStartOffset - linesForInput.begin()); - result.column = 1 + (offset - *lineStartOffset); - return result; -} - - - /* Symbol table. */ size_t SymbolTable::totalSize() const diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh index a7ad580d2..88ebc80f8 100644 --- a/src/libexpr/nixexpr.hh +++ b/src/libexpr/nixexpr.hh @@ -480,13 +480,16 @@ extern ExprBlackHole eBlackHole; struct StaticEnv { ExprWith * isWith; - const StaticEnv * up; + std::shared_ptr up; // Note: these must be in sorted order. typedef std::vector> Vars; Vars vars; - StaticEnv(ExprWith * isWith, const StaticEnv * up, size_t expectedSize = 0) : isWith(isWith), up(up) { + StaticEnv(ExprWith * isWith, std::shared_ptr up, size_t expectedSize = 0) + : isWith(isWith) + , up(std::move(up)) + { vars.reserve(expectedSize); }; diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 944c7b1af..bde721401 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -359,11 +359,18 @@ string_parts_interpolated path_start : PATH { - Path path(absPath(std::string_view{$1.p, $1.l}, state->basePath.path.abs())); + std::string_view literal({$1.p, $1.l}); + Path path(absPath(literal, state->basePath.path.abs())); /* add back in the trailing '/' to the first segment */ - if ($1.p[$1.l-1] == '/' && $1.l > 1) - path += "/"; - $$ = new ExprPath(ref(state->rootFS), std::move(path)); + if (literal.size() > 1 && literal.back() == '/') + path += '/'; + $$ = + /* Absolute paths are always interpreted relative to the + root filesystem accessor, rather than the accessor of the + current Nix expression. */ + literal.front() == '/' + ? new ExprPath(state->rootFS, std::move(path)) + : new ExprPath(state->basePath.accessor, std::move(path)); } | HPATH { if (state->settings.pureEval) { diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index 50d0d9895..3d602ae2d 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -1,3 +1,4 @@ +#include "store-api.hh" #include "eval.hh" namespace nix { @@ -12,4 +13,9 @@ SourcePath EvalState::rootPath(PathView path) return {rootFS, CanonPath(absPath(path))}; } +SourcePath EvalState::storePath(const StorePath & path) +{ + return {rootFS, CanonPath{store->printStorePath(path)}}; +} + } diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index e6f6f1dda..54682ea31 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -145,8 +145,7 @@ static SourcePath realisePath(EvalState & state, const PosIdx pos, Value & v, st try { if (!context.empty() && path.accessor == state.rootFS) { auto rewrites = state.realiseContext(context); - auto realPath = state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context); - path = {path.accessor, CanonPath(realPath)}; + path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))}; } return resolveSymlinks ? path.resolveSymlinks(*resolveSymlinks) : path; } catch (Error & e) { @@ -239,7 +238,7 @@ static void scopedImport(EvalState & state, const PosIdx pos, SourcePath & path, Env * env = &state.allocEnv(vScope->attrs()->size()); env->up = &state.baseEnv; - auto staticEnv = std::make_shared(nullptr, state.staticBaseEnv.get(), vScope->attrs()->size()); + auto staticEnv = std::make_shared(nullptr, state.staticBaseEnv, vScope->attrs()->size()); unsigned int displ = 0; for (auto & attr : *vScope->attrs()) { @@ -1595,9 +1594,13 @@ static RegisterPrimOp primop_placeholder({ .name = "placeholder", .args = {"output"}, .doc = R"( - Return a placeholder string for the specified *output* that will be - substituted by the corresponding output path at build time. Typical - outputs would be `"out"`, `"bin"` or `"dev"`. + Return at + [output placeholder string](@docroot@/store/derivation/index.md#output-placeholder) + for the specified *output* that will be substituted by the corresponding + [output path](@docroot@/glossary.md#gloss-output-path) + at build time. + + Typical outputs would be `"out"`, `"bin"` or `"dev"`. )", .fun = prim_placeholder, }); @@ -2135,12 +2138,15 @@ static RegisterPrimOp primop_outputOf({ .name = "__outputOf", .args = {"derivation-reference", "output-name"}, .doc = R"( - Return the output path of a derivation, literally or using a placeholder if needed. + Return the output path of a derivation, literally or using an + [input placeholder string](@docroot@/store/derivation/index.md#input-placeholder) + if needed. If the derivation has a statically-known output path (i.e. the derivation output is input-addressed, or fixed content-addresed), the output path will just be returned. - But if the derivation is content-addressed or if the derivation is itself not-statically produced (i.e. is the output of another derivation), a placeholder will be returned instead. + But if the derivation is content-addressed or if the derivation is itself not-statically produced (i.e. is the output of another derivation), an input placeholder will be returned instead. - *`derivation reference`* must be a string that may contain a regular store path to a derivation, or may be a placeholder reference. If the derivation is produced by a derivation, you must explicitly select `drv.outPath`. + *`derivation reference`* must be a string that may contain a regular store path to a derivation, or may be an input placeholder reference. + If the derivation is produced by a derivation, you must explicitly select `drv.outPath`. This primop can be chained arbitrarily deeply. For instance, @@ -2150,9 +2156,9 @@ static RegisterPrimOp primop_outputOf({ "out" ``` - will return a placeholder for the output of the output of `myDrv`. + will return a input placeholder for the output of the output of `myDrv`. - This primop corresponds to the `^` sigil for derivable paths, e.g. as part of installable syntax on the command line. + This primop corresponds to the `^` sigil for [deriving paths](@docroot@/glossary.md#gloss-deriving-paths), e.g. as part of installable syntax on the command line. )", .fun = prim_outputOf, .experimentalFeature = Xp::DynamicDerivations, @@ -2472,21 +2478,11 @@ static void addPath( const NixStringContext & context) { try { - StorePathSet refs; - if (path.accessor == state.rootFS && state.store->isInStore(path.path.abs())) { // FIXME: handle CA derivation outputs (where path needs to // be rewritten to the actual output). auto rewrites = state.realiseContext(context); - path = {state.rootFS, CanonPath(state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context))}; - - try { - auto [storePath, subPath] = state.store->toStorePath(path.path.abs()); - // FIXME: we should scanForReferences on the path before adding it - refs = state.store->queryPathInfo(storePath)->references; - path = {state.rootFS, CanonPath(state.store->toRealPath(storePath) + subPath)}; - } catch (Error &) { // FIXME: should be InvalidPathError - } + path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))}; } std::unique_ptr filter; diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 32bb5449f..0c82c82bf 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -90,24 +90,26 @@ static void fetchTree( fetchers::Input input { state.fetchSettings }; NixStringContext context; std::optional type; + auto fetcher = params.isFetchGit ? "fetchGit" : "fetchTree"; if (params.isFetchGit) type = "git"; state.forceValue(*args[0], pos); if (args[0]->type() == nAttrs) { - state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.fetchTree"); + state.forceAttrs(*args[0], pos, fmt("while evaluating the argument passed to '%s'", fetcher)); fetchers::Attrs attrs; if (auto aType = args[0]->attrs()->get(state.sType)) { if (type) state.error( - "unexpected attribute 'type'" + "unexpected argument 'type'" ).atPos(pos).debugThrow(); - type = state.forceStringNoCtx(*aType->value, aType->pos, "while evaluating the `type` attribute passed to builtins.fetchTree"); + type = state.forceStringNoCtx(*aType->value, aType->pos, + fmt("while evaluating the `type` argument passed to '%s'", fetcher)); } else if (!type) state.error( - "attribute 'type' is missing in call to 'fetchTree'" + "argument 'type' is missing in call to '%s'", fetcher ).atPos(pos).debugThrow(); attrs.emplace("type", type.value()); @@ -127,9 +129,8 @@ static void fetchTree( else if (attr.value->type() == nInt) { auto intValue = attr.value->integer().value; - if (intValue < 0) { - state.error("negative value given for fetchTree attr %1%: %2%", state.symbols[attr.name], intValue).atPos(pos).debugThrow(); - } + if (intValue < 0) + state.error("negative value given for '%s' argument '%s': %d", fetcher, state.symbols[attr.name], intValue).atPos(pos).debugThrow(); attrs.emplace(state.symbols[attr.name], uint64_t(intValue)); } else if (state.symbols[attr.name] == "publicKeys") { @@ -137,8 +138,8 @@ static void fetchTree( attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, context).dump()); } else - state.error("fetchTree argument '%s' is %s while a string, Boolean or integer is expected", - state.symbols[attr.name], showType(*attr.value)).debugThrow(); + state.error("argument '%s' to '%s' is %s while a string, Boolean or integer is expected", + state.symbols[attr.name], fetcher, showType(*attr.value)).debugThrow(); } if (params.isFetchGit && !attrs.contains("exportIgnore") && (!attrs.contains("submodules") || !*fetchers::maybeGetBoolAttr(attrs, "submodules"))) { @@ -153,14 +154,14 @@ static void fetchTree( if (!params.allowNameArgument) if (auto nameIter = attrs.find("name"); nameIter != attrs.end()) state.error( - "attribute 'name' isn’t supported in call to 'fetchTree'" + "argument 'name' isn’t supported in call to '%s'", fetcher ).atPos(pos).debugThrow(); input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); } else { auto url = state.coerceToString(pos, *args[0], context, - "while evaluating the first argument passed to the fetcher", - false, false).toOwned(); + fmt("while evaluating the first argument passed to '%s'", fetcher), + false, false).toOwned(); if (params.isFetchGit) { fetchers::Attrs attrs; @@ -178,15 +179,16 @@ static void fetchTree( if (!state.settings.pureEval && !input.isDirect()) input = lookupInRegistries(state.store, input).first; - if (state.settings.pureEval && !input.isConsideredLocked(state.fetchSettings)) { - auto fetcher = "fetchTree"; - if (params.isFetchGit) - fetcher = "fetchGit"; - - state.error( - "in pure evaluation mode, '%s' will not fetch unlocked input '%s'", - fetcher, input.to_string() - ).atPos(pos).debugThrow(); + if (state.settings.pureEval && !input.isLocked()) { + if (input.getNarHash()) + warn( + "Input '%s' is unlocked (e.g. lacks a Git revision) but does have a NAR hash. " + "This is deprecated since such inputs are verifiable but may not be reproducible.", + input.to_string()); + else + state.error( + "in pure evaluation mode, '%s' will not fetch unlocked input '%s'", + fetcher, input.to_string()).atPos(pos).debugThrow(); } state.checkURI(input.toURLString()); @@ -361,6 +363,12 @@ static RegisterPrimOp primop_fetchTree({ Default: `false` + - `lfs` (Bool, optional) + + Fetch any [Git LFS](https://git-lfs.com/) files. + + Default: `false` + - `allRefs` (Bool, optional) By default, this has no effect. This becomes relevant only once `shallow` cloning is disabled. @@ -683,6 +691,13 @@ static RegisterPrimOp primop_fetchGit({ Make a shallow clone when fetching the Git tree. When this is enabled, the options `ref` and `allRefs` have no effect anymore. + + - `lfs` (default: `false`) + + A boolean that when `true` specifies that [Git LFS] files should be fetched. + + [Git LFS]: https://git-lfs.com/ + - `allRefs` Whether to fetch all references (eg. branches and tags) of the repository. diff --git a/src/libfetchers-tests/access-tokens.cc b/src/libfetchers-tests/access-tokens.cc new file mode 100644 index 000000000..a869b088b --- /dev/null +++ b/src/libfetchers-tests/access-tokens.cc @@ -0,0 +1,97 @@ +#include +#include "fetchers.hh" +#include "fetch-settings.hh" +#include "json-utils.hh" +#include +#include "tests/characterization.hh" + +namespace nix::fetchers { + +using nlohmann::json; + +class AccessKeysTest : public ::testing::Test +{ +protected: + +public: + void SetUp() override {} + void TearDown() override {} +}; + +TEST_F(AccessKeysTest, singleOrgGitHub) +{ + fetchers::Settings fetchSettings = fetchers::Settings{}; + fetchSettings.accessTokens.get().insert({"github.com/a", "token"}); + auto i = Input::fromURL(fetchSettings, "github:a/b"); + + auto token = i.scheme->getAccessToken(fetchSettings, "github.com", "github.com/a/b"); + ASSERT_EQ(token, "token"); +} + +TEST_F(AccessKeysTest, nonMatches) +{ + fetchers::Settings fetchSettings = fetchers::Settings{}; + fetchSettings.accessTokens.get().insert({"github.com", "token"}); + auto i = Input::fromURL(fetchSettings, "gitlab:github.com/evil"); + + auto token = i.scheme->getAccessToken(fetchSettings, "gitlab.com", "gitlab.com/github.com/evil"); + ASSERT_EQ(token, std::nullopt); +} + +TEST_F(AccessKeysTest, noPartialMatches) +{ + fetchers::Settings fetchSettings = fetchers::Settings{}; + fetchSettings.accessTokens.get().insert({"github.com/partial", "token"}); + auto i = Input::fromURL(fetchSettings, "github:partial-match/repo"); + + auto token = i.scheme->getAccessToken(fetchSettings, "github.com", "github.com/partial-match"); + ASSERT_EQ(token, std::nullopt); +} + +TEST_F(AccessKeysTest, repoGitHub) +{ + fetchers::Settings fetchSettings = fetchers::Settings{}; + fetchSettings.accessTokens.get().insert({"github.com", "token"}); + fetchSettings.accessTokens.get().insert({"github.com/a/b", "another_token"}); + fetchSettings.accessTokens.get().insert({"github.com/a/c", "yet_another_token"}); + auto i = Input::fromURL(fetchSettings, "github:a/a"); + + auto token = i.scheme->getAccessToken(fetchSettings, "github.com", "github.com/a/a"); + ASSERT_EQ(token, "token"); + + token = i.scheme->getAccessToken(fetchSettings, "github.com", "github.com/a/b"); + ASSERT_EQ(token, "another_token"); + + token = i.scheme->getAccessToken(fetchSettings, "github.com", "github.com/a/c"); + ASSERT_EQ(token, "yet_another_token"); +} + +TEST_F(AccessKeysTest, multipleGitLab) +{ + fetchers::Settings fetchSettings = fetchers::Settings{}; + fetchSettings.accessTokens.get().insert({"gitlab.com", "token"}); + fetchSettings.accessTokens.get().insert({"gitlab.com/a/b", "another_token"}); + auto i = Input::fromURL(fetchSettings, "gitlab:a/b"); + + auto token = i.scheme->getAccessToken(fetchSettings, "gitlab.com", "gitlab.com/a/b"); + ASSERT_EQ(token, "another_token"); + + token = i.scheme->getAccessToken(fetchSettings, "gitlab.com", "gitlab.com/a/c"); + ASSERT_EQ(token, "token"); +} + +TEST_F(AccessKeysTest, multipleSourceHut) +{ + fetchers::Settings fetchSettings = fetchers::Settings{}; + fetchSettings.accessTokens.get().insert({"git.sr.ht", "token"}); + fetchSettings.accessTokens.get().insert({"git.sr.ht/~a/b", "another_token"}); + auto i = Input::fromURL(fetchSettings, "sourcehut:a/b"); + + auto token = i.scheme->getAccessToken(fetchSettings, "git.sr.ht", "git.sr.ht/~a/b"); + ASSERT_EQ(token, "another_token"); + + token = i.scheme->getAccessToken(fetchSettings, "git.sr.ht", "git.sr.ht/~a/c"); + ASSERT_EQ(token, "token"); +} + +} diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index 0bf3076dc..ee6ef1734 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -7,13 +7,18 @@ #include #include "fs-sink.hh" #include "serialise.hh" +#include "git-lfs-fetch.hh" namespace nix { +namespace fs { +using namespace std::filesystem; +} + class GitUtilsTest : public ::testing::Test { // We use a single repository for all tests. - Path tmpDir; + fs::path tmpDir; std::unique_ptr delTmpDir; public: @@ -25,7 +30,7 @@ public: // Create the repo with libgit2 git_libgit2_init(); git_repository * repo = nullptr; - auto r = git_repository_init(&repo, tmpDir.c_str(), 0); + auto r = git_repository_init(&repo, tmpDir.string().c_str(), 0); ASSERT_EQ(r, 0); git_repository_free(repo); } @@ -41,6 +46,11 @@ public: { return GitRepo::openRepo(tmpDir, true, false); } + + std::string getRepoName() const + { + return tmpDir.filename().string(); + } }; void writeString(CreateRegularFileSink & fileSink, std::string contents, bool executable) @@ -78,7 +88,7 @@ TEST_F(GitUtilsTest, sink_basic) // sink->createHardlink("foo-1.1/links/foo-2", CanonPath("foo-1.1/hello")); auto result = repo->dereferenceSingletonDirectory(sink->flush()); - auto accessor = repo->getAccessor(result, false); + auto accessor = repo->getAccessor(result, false, getRepoName()); auto entries = accessor->readDirectory(CanonPath::root); ASSERT_EQ(entries.size(), 5); ASSERT_EQ(accessor->readFile(CanonPath("hello")), "hello world"); diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 739435501..b60ff5675 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -31,6 +31,9 @@ deps_private += rapidcheck gtest = dependency('gtest', main : true) deps_private += gtest +libgit2 = dependency('libgit2') +deps_private += libgit2 + add_project_arguments( # TODO(Qyriad): Yes this is how the autoconf+Make system did it. # It would be nice for our headers to be idempotent instead. @@ -43,6 +46,8 @@ add_project_arguments( subdir('nix-meson-build-support/common') sources = files( + 'access-tokens.cc', + 'git-utils.cc', 'public-key.cc', ) diff --git a/src/libfetchers-tests/package.nix b/src/libfetchers-tests/package.nix index 1e379fc5a..6e3581183 100644 --- a/src/libfetchers-tests/package.nix +++ b/src/libfetchers-tests/package.nix @@ -7,6 +7,7 @@ nix-fetchers, nix-store-test-support, + libgit2, rapidcheck, gtest, runCommand, @@ -42,6 +43,7 @@ mkMesonExecutable (finalAttrs: { nix-store-test-support rapidcheck gtest + libgit2 ]; mesonFlags = [ diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh index 37fae0235..971ec4719 100644 --- a/src/libfetchers/fetch-settings.hh +++ b/src/libfetchers/fetch-settings.hh @@ -23,9 +23,11 @@ struct Settings : public Config Access tokens are specified as a string made up of space-separated `host=token` values. The specific token used is selected by matching the `host` portion against the - "host" specification of the input. The actual use of the - `token` value is determined by the type of resource being - accessed: + "host" specification of the input. The `host` portion may + contain a path element which will match against the prefix + URL for the input. (eg: `github.com/org=token`). The actual use + of the `token` value is determined by the type of resource + being accessed: * Github: the token value is the OAUTH-TOKEN string obtained as the Personal Access Token from the Github server (see diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 9459db087..abf021554 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -155,12 +155,6 @@ bool Input::isLocked() const return scheme && scheme->isLocked(*this); } -bool Input::isConsideredLocked( - const Settings & settings) const -{ - return isLocked() || (settings.allowDirtyLocks && getNarHash()); -} - bool Input::isFinal() const { return maybeGetBoolAttr(attrs, "__final").value_or(false); @@ -192,6 +186,7 @@ bool Input::contains(const Input & other) const return false; } +// FIXME: remove std::pair Input::fetchToStore(ref store) const { if (!scheme) @@ -206,10 +201,6 @@ std::pair Input::fetchToStore(ref store) const auto narHash = store->queryPathInfo(storePath)->narHash; result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - // FIXME: we would like to mark inputs as final in - // getAccessorUnchecked(), but then we can't add - // narHash. Or maybe narHash should be excluded from the - // concept of "final" inputs? result.attrs.insert_or_assign("__final", Explicit(true)); assert(result.isFinal()); @@ -290,6 +281,8 @@ std::pair, Input> Input::getAccessor(ref store) const try { auto [accessor, result] = getAccessorUnchecked(store); + result.attrs.insert_or_assign("__final", Explicit(true)); + checkLocks(*this, result); return {accessor, std::move(result)}; diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh index 644c267c1..01354a6e3 100644 --- a/src/libfetchers/fetchers.hh +++ b/src/libfetchers/fetchers.hh @@ -90,15 +90,6 @@ public: */ bool isLocked() const; - /** - * Return whether the input is either locked, or, if - * `allow-dirty-locks` is enabled, it has a NAR hash. In the - * latter case, we can verify the input but we may not be able to - * fetch it from anywhere. - */ - bool isConsideredLocked( - const Settings & settings) const; - /** * Only for relative path flakes, i.e. 'path:./foo', returns the * relative path, i.e. './foo'. @@ -273,6 +264,9 @@ struct InputScheme virtual std::optional isRelative(const Input & input) const { return std::nullopt; } + + virtual std::optional getAccessToken(const fetchers::Settings & settings, const std::string & host, const std::string & url) const + { return {};} }; void registerInputScheme(std::shared_ptr && fetcher); diff --git a/src/libfetchers/git-lfs-fetch.cc b/src/libfetchers/git-lfs-fetch.cc new file mode 100644 index 000000000..bd6c01435 --- /dev/null +++ b/src/libfetchers/git-lfs-fetch.cc @@ -0,0 +1,279 @@ +#include "git-lfs-fetch.hh" +#include "git-utils.hh" +#include "filetransfer.hh" +#include "processes.hh" +#include "url.hh" +#include "users.hh" +#include "hash.hh" + +#include +#include +#include +#include + +#include + +namespace nix::lfs { + +// if authHeader is "", downloadToSink assumes no auth is expected +static void downloadToSink( + const std::string & url, + const std::string & authHeader, + // FIXME: passing a StringSink is superfluous, we may as well + // return a string. Or use an abstract Sink for streaming. + StringSink & sink, + std::string sha256Expected, + size_t sizeExpected) +{ + FileTransferRequest request(url); + Headers headers; + if (!authHeader.empty()) + headers.push_back({"Authorization", authHeader}); + request.headers = headers; + getFileTransfer()->download(std::move(request), sink); + + auto sizeActual = sink.s.length(); + if (sizeExpected != sizeActual) + throw Error("size mismatch while fetching %s: expected %d but got %d", url, sizeExpected, sizeActual); + + auto sha256Actual = hashString(HashAlgorithm::SHA256, sink.s).to_string(HashFormat::Base16, false); + if (sha256Actual != sha256Expected) + throw Error( + "hash mismatch while fetching %s: expected sha256:%s but got sha256:%s", url, sha256Expected, sha256Actual); +} + +static std::string getLfsApiToken(const ParsedURL & url) +{ + auto [status, output] = runProgram(RunOptions{ + .program = "ssh", + .args = {*url.authority, "git-lfs-authenticate", url.path, "download"}, + }); + + if (output.empty()) + throw Error( + "git-lfs-authenticate: no output (cmd: ssh %s git-lfs-authenticate %s download)", + url.authority.value_or(""), + url.path); + + auto queryResp = nlohmann::json::parse(output); + if (!queryResp.contains("header")) + throw Error("no header in git-lfs-authenticate response"); + if (!queryResp["header"].contains("Authorization")) + throw Error("no Authorization in git-lfs-authenticate response"); + + return queryResp["header"]["Authorization"].get(); +} + +typedef std::unique_ptr> GitConfig; +typedef std::unique_ptr> GitConfigEntry; + +static std::string getLfsEndpointUrl(git_repository * repo) +{ + GitConfig config; + if (git_repository_config(Setter(config), repo)) { + GitConfigEntry entry; + if (!git_config_get_entry(Setter(entry), config.get(), "lfs.url")) { + auto value = std::string(entry->value); + if (!value.empty()) { + debug("Found explicit lfs.url value: %s", value); + return value; + } + } + } + + git_remote * remote = nullptr; + if (git_remote_lookup(&remote, repo, "origin")) + return ""; + + const char * url_c_str = git_remote_url(remote); + if (!url_c_str) + return ""; + + return std::string(url_c_str); +} + +static std::optional parseLfsPointer(std::string_view content, std::string_view filename) +{ + // https://github.com/git-lfs/git-lfs/blob/2ef4108/docs/spec.md + // + // example git-lfs pointer file: + // version https://git-lfs.github.com/spec/v1 + // oid sha256:f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf + // size 10000000 + // (ending \n) + + if (!content.starts_with("version ")) { + // Invalid pointer file + return std::nullopt; + } + + if (!content.starts_with("version https://git-lfs.github.com/spec/v1")) { + // In case there's new spec versions in the future, but for now only v1 exists + debug("Invalid version found on potential lfs pointer file, skipping"); + return std::nullopt; + } + + std::string oid; + std::string size; + + for (auto & line : tokenizeString(content, "\n")) { + if (line.starts_with("version ")) { + continue; + } + if (line.starts_with("oid sha256:")) { + oid = line.substr(11); // skip "oid sha256:" + continue; + } + if (line.starts_with("size ")) { + size = line.substr(5); // skip "size " + continue; + } + + debug("Custom extension '%s' found, ignoring", line); + } + + if (oid.length() != 64 || !std::all_of(oid.begin(), oid.end(), ::isxdigit)) { + debug("Invalid sha256 %s, skipping", oid); + return std::nullopt; + } + + if (size.length() == 0 || !std::all_of(size.begin(), size.end(), ::isdigit)) { + debug("Invalid size %s, skipping", size); + return std::nullopt; + } + + return std::make_optional(Pointer{oid, std::stoul(size)}); +} + +Fetch::Fetch(git_repository * repo, git_oid rev) +{ + this->repo = repo; + this->rev = rev; + + const auto remoteUrl = lfs::getLfsEndpointUrl(repo); + + this->url = nix::parseURL(nix::fixGitURL(remoteUrl)).canonicalise(); +} + +bool Fetch::shouldFetch(const CanonPath & path) const +{ + const char * attr = nullptr; + git_attr_options opts = GIT_ATTR_OPTIONS_INIT; + opts.attr_commit_id = this->rev; + opts.flags = GIT_ATTR_CHECK_INCLUDE_COMMIT | GIT_ATTR_CHECK_NO_SYSTEM; + if (git_attr_get_ext(&attr, (git_repository *) (this->repo), &opts, path.rel_c_str(), "filter")) + throw Error("cannot get git-lfs attribute: %s", git_error_last()->message); + debug("Git filter for '%s' is '%s'", path, attr ? attr : "null"); + return attr != nullptr && !std::string(attr).compare("lfs"); +} + +static nlohmann::json pointerToPayload(const std::vector & items) +{ + nlohmann::json jArray = nlohmann::json::array(); + for (const auto & pointer : items) + jArray.push_back({{"oid", pointer.oid}, {"size", pointer.size}}); + return jArray; +} + +std::vector Fetch::fetchUrls(const std::vector & pointers) const +{ + ParsedURL httpUrl(url); + httpUrl.scheme = url.scheme == "ssh" ? "https" : url.scheme; + FileTransferRequest request(httpUrl.to_string() + "/info/lfs/objects/batch"); + request.post = true; + Headers headers; + if (this->url.scheme == "ssh") + headers.push_back({"Authorization", lfs::getLfsApiToken(this->url)}); + headers.push_back({"Content-Type", "application/vnd.git-lfs+json"}); + headers.push_back({"Accept", "application/vnd.git-lfs+json"}); + request.headers = headers; + nlohmann::json oidList = pointerToPayload(pointers); + nlohmann::json data = {{"operation", "download"}}; + data["objects"] = oidList; + request.data = data.dump(); + + FileTransferResult result = getFileTransfer()->upload(request); + auto responseString = result.data; + + std::vector objects; + // example resp here: + // {"objects":[{"oid":"f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf","size":10000000,"actions":{"download":{"href":"https://gitlab.com/b-camacho/test-lfs.git/gitlab-lfs/objects/f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf","header":{"Authorization":"Basic + // Yi1jYW1hY2hvOmV5SjBlWEFpT2lKS1YxUWlMQ0poYkdjaU9pSklVekkxTmlKOS5leUprWVhSaElqcDdJbUZqZEc5eUlqb2lZaTFqWVcxaFkyaHZJbjBzSW1wMGFTSTZJbUptTURZNFpXVTFMVEprWmpVdE5HWm1ZUzFpWWpRMExUSXpNVEV3WVRReU1qWmtaaUlzSW1saGRDSTZNVGN4TkRZeE16ZzBOU3dpYm1KbUlqb3hOekUwTmpFek9EUXdMQ0psZUhBaU9qRTNNVFEyTWpFd05EVjkuZk9yMDNkYjBWSTFXQzFZaTBKRmJUNnJTTHJPZlBwVW9lYllkT0NQZlJ4QQ=="}}},"authenticated":true}]} + + try { + auto resp = nlohmann::json::parse(responseString); + if (resp.contains("objects")) + objects.insert(objects.end(), resp["objects"].begin(), resp["objects"].end()); + else + throw Error("response does not contain 'objects'"); + + return objects; + } catch (const nlohmann::json::parse_error & e) { + printMsg(lvlTalkative, "Full response: '%1%'", responseString); + throw Error("response did not parse as json: %s", e.what()); + } +} + +void Fetch::fetch( + const std::string & content, + const CanonPath & pointerFilePath, + StringSink & sink, + std::function sizeCallback) const +{ + debug("trying to fetch '%s' using git-lfs", pointerFilePath); + + if (content.length() >= 1024) { + warn("encountered file '%s' that should have been a git-lfs pointer, but is too large", pointerFilePath); + sizeCallback(content.length()); + sink(content); + return; + } + + const auto pointer = parseLfsPointer(content, pointerFilePath.rel()); + if (pointer == std::nullopt) { + warn("encountered file '%s' that should have been a git-lfs pointer, but is invalid", pointerFilePath); + sizeCallback(content.length()); + sink(content); + return; + } + + Path cacheDir = getCacheDir() + "/git-lfs"; + std::string key = hashString(HashAlgorithm::SHA256, pointerFilePath.rel()).to_string(HashFormat::Base16, false) + + "/" + pointer->oid; + Path cachePath = cacheDir + "/" + key; + if (pathExists(cachePath)) { + debug("using cache entry %s -> %s", key, cachePath); + sink(readFile(cachePath)); + return; + } + debug("did not find cache entry for %s", key); + + std::vector pointers; + pointers.push_back(pointer.value()); + const auto objUrls = fetchUrls(pointers); + + const auto obj = objUrls[0]; + try { + std::string sha256 = obj.at("oid"); // oid is also the sha256 + std::string ourl = obj.at("actions").at("download").at("href"); + std::string authHeader = ""; + if (obj.at("actions").at("download").contains("header") + && obj.at("actions").at("download").at("header").contains("Authorization")) { + authHeader = obj["actions"]["download"]["header"]["Authorization"]; + } + const uint64_t size = obj.at("size"); + sizeCallback(size); + downloadToSink(ourl, authHeader, sink, sha256, size); + + debug("creating cache entry %s -> %s", key, cachePath); + if (!pathExists(dirOf(cachePath))) + createDirs(dirOf(cachePath)); + writeFile(cachePath, sink.s); + + debug("%s fetched with git-lfs", pointerFilePath); + } catch (const nlohmann::json::out_of_range & e) { + throw Error("bad json from /info/lfs/objects/batch: %s %s", obj, e.what()); + } +} + +} // namespace nix::lfs diff --git a/src/libfetchers/git-lfs-fetch.hh b/src/libfetchers/git-lfs-fetch.hh new file mode 100644 index 000000000..36df91962 --- /dev/null +++ b/src/libfetchers/git-lfs-fetch.hh @@ -0,0 +1,43 @@ +#include "canon-path.hh" +#include "serialise.hh" +#include "url.hh" + +#include + +#include + +namespace nix::lfs { + +/** + * git-lfs pointer + * @see https://github.com/git-lfs/git-lfs/blob/2ef4108/docs/spec.md + */ +struct Pointer +{ + std::string oid; // git-lfs managed object id. you give this to the lfs server + // for downloads + size_t size; // in bytes +}; + +struct Fetch +{ + // Reference to the repository + const git_repository * repo; + + // Git commit being fetched + git_oid rev; + + // derived from git remote url + nix::ParsedURL url; + + Fetch(git_repository * repo, git_oid rev); + bool shouldFetch(const CanonPath & path) const; + void fetch( + const std::string & content, + const CanonPath & pointerFilePath, + StringSink & sink, + std::function sizeCallback) const; + std::vector fetchUrls(const std::vector & pointers) const; +}; + +} // namespace nix::lfs diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index a6b13fb31..a2761a543 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -1,4 +1,5 @@ #include "git-utils.hh" +#include "git-lfs-fetch.hh" #include "cache.hh" #include "finally.hh" #include "processes.hh" @@ -60,14 +61,6 @@ namespace nix { struct GitSourceAccessor; -// Some wrapper types that ensure that the git_*_free functions get called. -template -struct Deleter -{ - template - void operator()(T * p) const { del(p); }; -}; - typedef std::unique_ptr> Repository; typedef std::unique_ptr> TreeEntry; typedef std::unique_ptr> Tree; @@ -85,20 +78,6 @@ typedef std::unique_ptr> ObjectDb; typedef std::unique_ptr> PackBuilder; typedef std::unique_ptr> Indexer; -// A helper to ensure that we don't leak objects returned by libgit2. -template -struct Setter -{ - T & t; - typename T::pointer p = nullptr; - - Setter(T & t) : t(t) { } - - ~Setter() { if (p) t = T(p); } - - operator typename T::pointer * () { return &p; } -}; - Hash toHash(const git_oid & oid) { #ifdef GIT_EXPERIMENTAL_SHA256 @@ -506,12 +485,15 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this /** * A 'GitSourceAccessor' with no regard for export-ignore or any other transformations. */ - ref getRawAccessor(const Hash & rev); + ref getRawAccessor( + const Hash & rev, + bool smudgeLfs = false); ref getAccessor( const Hash & rev, bool exportIgnore, - std::string displayPrefix) override; + std::string displayPrefix, + bool smudgeLfs = false) override; ref getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError e) override; @@ -670,24 +652,40 @@ ref GitRepo::openRepo(const std::filesystem::path & path, bool create, /** * Raw git tree input accessor. */ + struct GitSourceAccessor : SourceAccessor { ref repo; Object root; + std::optional lfsFetch = std::nullopt; - GitSourceAccessor(ref repo_, const Hash & rev) + GitSourceAccessor(ref repo_, const Hash & rev, bool smudgeLfs) : repo(repo_) , root(peelToTreeOrBlob(lookupObject(*repo, hashToOID(rev)).get())) { + if (smudgeLfs) + lfsFetch = std::make_optional(lfs::Fetch(*repo, hashToOID(rev))); } std::string readBlob(const CanonPath & path, bool symlink) { - auto blob = getBlob(path, symlink); + const auto blob = getBlob(path, symlink); - auto data = std::string_view((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); + if (lfsFetch) { + if (lfsFetch->shouldFetch(path)) { + StringSink s; + try { + auto contents = std::string((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); + lfsFetch->fetch(contents, path, s, [&s](uint64_t size){ s.s.reserve(size); }); + } catch (Error & e) { + e.addTrace({}, "while smudging git-lfs file '%s'", path); + throw; + } + return s.s; + } + } - return std::string(data); + return std::string((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); } std::string readFile(const CanonPath & path) override @@ -1191,19 +1189,22 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink } }; -ref GitRepoImpl::getRawAccessor(const Hash & rev) +ref GitRepoImpl::getRawAccessor( + const Hash & rev, + bool smudgeLfs) { auto self = ref(shared_from_this()); - return make_ref(self, rev); + return make_ref(self, rev, smudgeLfs); } ref GitRepoImpl::getAccessor( const Hash & rev, bool exportIgnore, - std::string displayPrefix) + std::string displayPrefix, + bool smudgeLfs) { auto self = ref(shared_from_this()); - ref rawGitAccessor = getRawAccessor(rev); + ref rawGitAccessor = getRawAccessor(rev, smudgeLfs); rawGitAccessor->setPathDisplay(std::move(displayPrefix)); if (exportIgnore) return make_ref(self, rawGitAccessor, rev); diff --git a/src/libfetchers/git-utils.hh b/src/libfetchers/git-utils.hh index 9677f5079..c683bd058 100644 --- a/src/libfetchers/git-utils.hh +++ b/src/libfetchers/git-utils.hh @@ -89,7 +89,8 @@ struct GitRepo virtual ref getAccessor( const Hash & rev, bool exportIgnore, - std::string displayPrefix) = 0; + std::string displayPrefix, + bool smudgeLfs = false) = 0; virtual ref getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError makeNotAllowedError) = 0; @@ -126,4 +127,26 @@ struct GitRepo ref getTarballCache(); +// A helper to ensure that the `git_*_free` functions get called. +template +struct Deleter +{ + template + void operator()(T * p) const { del(p); }; +}; + +// A helper to ensure that we don't leak objects returned by libgit2. +template +struct Setter +{ + T & t; + typename T::pointer p = nullptr; + + Setter(T & t) : t(t) { } + + ~Setter() { if (p) t = T(p); } + + operator typename T::pointer * () { return &p; } +}; + } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 0d423a7a3..f46334d30 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -9,7 +9,6 @@ #include "pathlocks.hh" #include "processes.hh" #include "git.hh" -#include "mounted-source-accessor.hh" #include "git-utils.hh" #include "logging.hh" #include "finally.hh" @@ -185,7 +184,7 @@ struct GitInputScheme : InputScheme for (auto & [name, value] : url.query) { if (name == "rev" || name == "ref" || name == "keytype" || name == "publicKey" || name == "publicKeys") attrs.emplace(name, value); - else if (name == "shallow" || name == "submodules" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit") + else if (name == "shallow" || name == "submodules" || name == "lfs" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit") attrs.emplace(name, Explicit { value == "1" }); else url2.query.emplace(name, value); @@ -210,6 +209,7 @@ struct GitInputScheme : InputScheme "rev", "shallow", "submodules", + "lfs", "exportIgnore", "lastModified", "revCount", @@ -262,6 +262,8 @@ struct GitInputScheme : InputScheme if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref); if (getShallowAttr(input)) url.query.insert_or_assign("shallow", "1"); + if (getLfsAttr(input)) + url.query.insert_or_assign("lfs", "1"); if (getSubmodulesAttr(input)) url.query.insert_or_assign("submodules", "1"); if (maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false)) @@ -349,8 +351,7 @@ struct GitInputScheme : InputScheme if (commitMsg) { // Pause the logger to allow for user input (such as a gpg passphrase) in `git commit` - logger->pause(); - Finally restoreLogger([]() { logger->resume(); }); + auto suspension = logger->suspend(); runProgram("git", true, { "-C", repoPath->string(), "--git-dir", repoInfo.gitDir, "commit", std::string(path.rel()), "-F", "-" }, *commitMsg); @@ -411,6 +412,11 @@ struct GitInputScheme : InputScheme return maybeGetBoolAttr(input.attrs, "submodules").value_or(false); } + bool getLfsAttr(const Input & input) const + { + return maybeGetBoolAttr(input.attrs, "lfs").value_or(false); + } + bool getExportIgnoreAttr(const Input & input) const { return maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false); @@ -678,7 +684,8 @@ struct GitInputScheme : InputScheme verifyCommit(input, repo); bool exportIgnore = getExportIgnoreAttr(input); - auto accessor = repo->getAccessor(rev, exportIgnore, "«" + input.to_string() + "»"); + bool smudgeLfs = getLfsAttr(input); + auto accessor = repo->getAccessor(rev, exportIgnore, "«" + input.to_string() + "»", smudgeLfs); /* If the repo has submodules, fetch them and return a mounted input accessor consisting of the accessor for the top-level @@ -698,6 +705,7 @@ struct GitInputScheme : InputScheme attrs.insert_or_assign("rev", submoduleRev.gitRev()); attrs.insert_or_assign("exportIgnore", Explicit{ exportIgnore }); attrs.insert_or_assign("submodules", Explicit{ true }); + attrs.insert_or_assign("lfs", Explicit{ smudgeLfs }); attrs.insert_or_assign("allRefs", Explicit{ true }); auto submoduleInput = fetchers::Input::fromAttrs(*input.settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = @@ -838,7 +846,7 @@ struct GitInputScheme : InputScheme { auto makeFingerprint = [&](const Hash & rev) { - return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : ""); + return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "") + (getLfsAttr(input) ? ";l" : ""); }; if (auto rev = input.getRev()) diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 24a4f14af..5cea22eea 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -172,9 +172,30 @@ struct GitArchiveInputScheme : InputScheme return input; } - std::optional getAccessToken(const fetchers::Settings & settings, const std::string & host) const + // Search for the longest possible match starting from the begining and ending at either the end or a path segment. + std::optional getAccessToken(const fetchers::Settings & settings, const std::string & host, const std::string & url) const override { auto tokens = settings.accessTokens.get(); + std::string answer; + size_t answer_match_len = 0; + if(! url.empty()) { + for (auto & token : tokens) { + auto first = url.find(token.first); + if ( + first != std::string::npos + && token.first.length() > answer_match_len + && first == 0 + && url.substr(0,token.first.length()) == token.first + && (url.length() == token.first.length() || url[token.first.length()] == '/') + ) + { + answer = token.second; + answer_match_len = token.first.length(); + } + } + if (!answer.empty()) + return answer; + } if (auto token = get(tokens, host)) return *token; return {}; @@ -182,10 +203,22 @@ struct GitArchiveInputScheme : InputScheme Headers makeHeadersWithAuthTokens( const fetchers::Settings & settings, - const std::string & host) const + const std::string & host, + const Input & input) const + { + auto owner = getStrAttr(input.attrs, "owner"); + auto repo = getStrAttr(input.attrs, "repo"); + auto hostAndPath = fmt( "%s/%s/%s", host, owner, repo); + return makeHeadersWithAuthTokens(settings, host, hostAndPath); + } + + Headers makeHeadersWithAuthTokens( + const fetchers::Settings & settings, + const std::string & host, + const std::string & hostAndPath) const { Headers headers; - auto accessToken = getAccessToken(settings, host); + auto accessToken = getAccessToken(settings, host, hostAndPath); if (accessToken) { auto hdr = accessHeaderFromToken(*accessToken); if (hdr) @@ -361,7 +394,7 @@ struct GitHubInputScheme : GitArchiveInputScheme : "https://%s/api/v3/repos/%s/%s/commits/%s", host, getOwner(input), getRepo(input), *input.getRef()); - Headers headers = makeHeadersWithAuthTokens(*input.settings, host); + Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto json = nlohmann::json::parse( readFile( @@ -378,7 +411,7 @@ struct GitHubInputScheme : GitArchiveInputScheme { auto host = getHost(input); - Headers headers = makeHeadersWithAuthTokens(*input.settings, host); + Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); // If we have no auth headers then we default to the public archive // urls so we do not run into rate limits. @@ -435,7 +468,7 @@ struct GitLabInputScheme : GitArchiveInputScheme auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/commits?ref_name=%s", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); - Headers headers = makeHeadersWithAuthTokens(*input.settings, host); + Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto json = nlohmann::json::parse( readFile( @@ -465,7 +498,7 @@ struct GitLabInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), input.getRev()->to_string(HashFormat::Base16, false)); - Headers headers = makeHeadersWithAuthTokens(*input.settings, host); + Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); return DownloadUrl { url, headers }; } @@ -505,7 +538,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme auto base_url = fmt("https://%s/%s/%s", host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")); - Headers headers = makeHeadersWithAuthTokens(*input.settings, host); + Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); std::string refUri; if (ref == "HEAD") { @@ -552,7 +585,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), input.getRev()->to_string(HashFormat::Base16, false)); - Headers headers = makeHeadersWithAuthTokens(*input.settings, host); + Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); return DownloadUrl { url, headers }; } diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 58afbb7d0..725254b56 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -14,7 +14,7 @@ cxx = meson.get_compiler('cpp') subdir('nix-meson-build-support/deps-lists') -configdata = configuration_data() +configuration_data() deps_private_maybe_subproject = [ ] @@ -48,12 +48,12 @@ sources = files( 'fetch-to-store.cc', 'fetchers.cc', 'filtering-source-accessor.cc', + 'git-lfs-fetch.cc', 'git-utils.cc', 'git.cc', 'github.cc', 'indirect.cc', 'mercurial.cc', - 'mounted-source-accessor.cc', 'path.cc', 'registry.cc', 'store-path-accessor.cc', @@ -69,8 +69,8 @@ headers = files( 'fetch-to-store.hh', 'fetchers.hh', 'filtering-source-accessor.hh', + 'git-lfs-fetch.hh', 'git-utils.hh', - 'mounted-source-accessor.hh', 'registry.hh', 'store-path-accessor.hh', 'tarball.hh', diff --git a/src/libfetchers/mounted-source-accessor.hh b/src/libfetchers/mounted-source-accessor.hh deleted file mode 100644 index 45cbcb09a..000000000 --- a/src/libfetchers/mounted-source-accessor.hh +++ /dev/null @@ -1,9 +0,0 @@ -#pragma once - -#include "source-accessor.hh" - -namespace nix { - -ref makeMountedSourceAccessor(std::map> mounts); - -} diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 37d626d58..e3f2afd8a 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -125,7 +125,7 @@ struct PathInputScheme : InputScheme auto absPath = getAbsPath(input); - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying '%s' to the store", absPath)); + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); // FIXME: check whether access to 'path' is allowed. auto storePath = store->maybeParseStorePath(absPath.string()); diff --git a/src/libflake/flake/flake.cc b/src/libflake/flake/flake.cc index ccfcd0390..b678d5b64 100644 --- a/src/libflake/flake/flake.cc +++ b/src/libflake/flake/flake.cc @@ -12,6 +12,7 @@ #include "flake/settings.hh" #include "value-to-json.hh" #include "local-fs-store.hh" +#include "fetch-to-store.hh" #include @@ -24,7 +25,7 @@ namespace flake { struct FetchedFlake { FlakeRef lockedRef; - StorePath storePath; + ref accessor; }; typedef std::map FlakeCache; @@ -40,7 +41,7 @@ static std::optional lookupInFlakeCache( return i->second; } -static std::tuple fetchOrSubstituteTree( +static std::tuple, FlakeRef, FlakeRef> fetchOrSubstituteTree( EvalState & state, const FlakeRef & originalRef, bool useRegistries, @@ -51,8 +52,8 @@ static std::tuple fetchOrSubstituteTree( if (!fetched) { if (originalRef.input.isDirect()) { - auto [storePath, lockedRef] = originalRef.fetchTree(state.store); - fetched.emplace(FetchedFlake{.lockedRef = lockedRef, .storePath = storePath}); + auto [accessor, lockedRef] = originalRef.lazyFetch(state.store); + fetched.emplace(FetchedFlake{.lockedRef = lockedRef, .accessor = accessor}); } else { if (useRegistries) { resolvedRef = originalRef.resolve( @@ -64,8 +65,8 @@ static std::tuple fetchOrSubstituteTree( }); fetched = lookupInFlakeCache(flakeCache, originalRef); if (!fetched) { - auto [storePath, lockedRef] = resolvedRef.fetchTree(state.store); - fetched.emplace(FetchedFlake{.lockedRef = lockedRef, .storePath = storePath}); + auto [accessor, lockedRef] = resolvedRef.lazyFetch(state.store); + fetched.emplace(FetchedFlake{.lockedRef = lockedRef, .accessor = accessor}); } flakeCache.insert_or_assign(resolvedRef, *fetched); } @@ -76,14 +77,27 @@ static std::tuple fetchOrSubstituteTree( flakeCache.insert_or_assign(originalRef, *fetched); } - debug("got tree '%s' from '%s'", - state.store->printStorePath(fetched->storePath), fetched->lockedRef); + debug("got tree '%s' from '%s'", fetched->accessor, fetched->lockedRef); - state.allowPath(fetched->storePath); + return {fetched->accessor, resolvedRef, fetched->lockedRef}; +} - assert(!originalRef.input.getNarHash() || fetched->storePath == originalRef.input.computeStorePath(*state.store)); +static StorePath copyInputToStore( + EvalState & state, + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor) +{ + auto storePath = fetchToStore(*state.store, accessor, FetchMode::Copy, input.getName()); - return {fetched->storePath, resolvedRef, fetched->lockedRef}; + state.allowPath(storePath); + + auto narHash = state.store->queryPathInfo(storePath)->narHash; + input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + + assert(!originalInput.getNarHash() || storePath == originalInput.computeStorePath(*state.store)); + + return storePath; } static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) @@ -101,19 +115,54 @@ static void expectType(EvalState & state, ValueType type, showType(type), showType(value.type()), state.positions[pos]); } -static std::map parseFlakeInputs( +static std::pair, fetchers::Attrs> parseFlakeInputs( EvalState & state, Value * value, const PosIdx pos, - const InputPath & lockRootPath, - const SourcePath & flakeDir); + const InputAttrPath & lockRootAttrPath, + const SourcePath & flakeDir, + bool allowSelf); + +static void parseFlakeInputAttr( + EvalState & state, + const Attr & attr, + fetchers::Attrs & attrs) +{ + // Allow selecting a subset of enum values + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" + switch (attr.value->type()) { + case nString: + attrs.emplace(state.symbols[attr.name], attr.value->c_str()); + break; + case nBool: + attrs.emplace(state.symbols[attr.name], Explicit { attr.value->boolean() }); + break; + case nInt: { + auto intValue = attr.value->integer().value; + if (intValue < 0) + state.error("negative value given for flake input attribute %1%: %2%", state.symbols[attr.name], intValue).debugThrow(); + attrs.emplace(state.symbols[attr.name], uint64_t(intValue)); + break; + } + default: + if (attr.name == state.symbols.create("publicKeys")) { + experimentalFeatureSettings.require(Xp::VerifiedFetches); + NixStringContext emptyContext = {}; + attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, attr.pos, emptyContext).dump()); + } else + state.error("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", + state.symbols[attr.name], showType(*attr.value)).debugThrow(); + } + #pragma GCC diagnostic pop +} static FlakeInput parseFlakeInput( EvalState & state, std::string_view inputName, Value * value, const PosIdx pos, - const InputPath & lockRootPath, + const InputAttrPath & lockRootAttrPath, const SourcePath & flakeDir) { expectType(state, nAttrs, *value, pos); @@ -137,7 +186,7 @@ static FlakeInput parseFlakeInput( else if (attr.value->type() == nPath) { auto path = attr.value->path(); if (path.accessor != flakeDir.accessor) - throw Error("input path '%s' at %s must be in the same source tree as %s", + throw Error("input attribute path '%s' at %s must be in the same source tree as %s", path, state.positions[attr.pos], flakeDir); url = "path:" + flakeDir.path.makeRelative(path.path); } @@ -149,44 +198,14 @@ static FlakeInput parseFlakeInput( expectType(state, nBool, *attr.value, attr.pos); input.isFlake = attr.value->boolean(); } else if (attr.name == sInputs) { - input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootPath, flakeDir); + input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootAttrPath, flakeDir, false).first; } else if (attr.name == sFollows) { expectType(state, nString, *attr.value, attr.pos); - auto follows(parseInputPath(attr.value->c_str())); - follows.insert(follows.begin(), lockRootPath.begin(), lockRootPath.end()); + auto follows(parseInputAttrPath(attr.value->c_str())); + follows.insert(follows.begin(), lockRootAttrPath.begin(), lockRootAttrPath.end()); input.follows = follows; - } else { - // Allow selecting a subset of enum values - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wswitch-enum" - switch (attr.value->type()) { - case nString: - attrs.emplace(state.symbols[attr.name], attr.value->c_str()); - break; - case nBool: - attrs.emplace(state.symbols[attr.name], Explicit { attr.value->boolean() }); - break; - case nInt: { - auto intValue = attr.value->integer().value; - - if (intValue < 0) { - state.error("negative value given for flake input attribute %1%: %2%", state.symbols[attr.name], intValue).debugThrow(); - } - - attrs.emplace(state.symbols[attr.name], uint64_t(intValue)); - break; - } - default: - if (attr.name == state.symbols.create("publicKeys")) { - experimentalFeatureSettings.require(Xp::VerifiedFetches); - NixStringContext emptyContext = {}; - attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, emptyContext).dump()); - } else - state.error("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", - state.symbols[attr.name], showType(*attr.value)).debugThrow(); - } - #pragma GCC diagnostic pop - } + } else + parseFlakeInputAttr(state, attr, attrs); } catch (Error & e) { e.addTrace( state.positions[attr.pos], @@ -216,28 +235,39 @@ static FlakeInput parseFlakeInput( return input; } -static std::map parseFlakeInputs( +static std::pair, fetchers::Attrs> parseFlakeInputs( EvalState & state, Value * value, const PosIdx pos, - const InputPath & lockRootPath, - const SourcePath & flakeDir) + const InputAttrPath & lockRootAttrPath, + const SourcePath & flakeDir, + bool allowSelf) { std::map inputs; + fetchers::Attrs selfAttrs; expectType(state, nAttrs, *value, pos); for (auto & inputAttr : *value->attrs()) { - inputs.emplace(state.symbols[inputAttr.name], - parseFlakeInput(state, - state.symbols[inputAttr.name], - inputAttr.value, - inputAttr.pos, - lockRootPath, - flakeDir)); + auto inputName = state.symbols[inputAttr.name]; + if (inputName == "self") { + if (!allowSelf) + throw Error("'self' input attribute not allowed at %s", state.positions[inputAttr.pos]); + expectType(state, nAttrs, *inputAttr.value, inputAttr.pos); + for (auto & attr : *inputAttr.value->attrs()) + parseFlakeInputAttr(state, attr, selfAttrs); + } else { + inputs.emplace(inputName, + parseFlakeInput(state, + inputName, + inputAttr.value, + inputAttr.pos, + lockRootAttrPath, + flakeDir)); + } } - return inputs; + return {inputs, selfAttrs}; } static Flake readFlake( @@ -246,7 +276,7 @@ static Flake readFlake( const FlakeRef & resolvedRef, const FlakeRef & lockedRef, const SourcePath & rootDir, - const InputPath & lockRootPath) + const InputAttrPath & lockRootAttrPath) { auto flakeDir = rootDir / CanonPath(resolvedRef.subdir); auto flakePath = flakeDir / "flake.nix"; @@ -269,8 +299,11 @@ static Flake readFlake( auto sInputs = state.symbols.create("inputs"); - if (auto inputs = vInfo.attrs()->get(sInputs)) - flake.inputs = parseFlakeInputs(state, inputs->value, inputs->pos, lockRootPath, flakeDir); + if (auto inputs = vInfo.attrs()->get(sInputs)) { + auto [flakeInputs, selfAttrs] = parseFlakeInputs(state, inputs->value, inputs->pos, lockRootAttrPath, flakeDir, true); + flake.inputs = std::move(flakeInputs); + flake.selfAttrs = std::move(selfAttrs); + } auto sOutputs = state.symbols.create("outputs"); @@ -301,10 +334,10 @@ static Flake readFlake( state.symbols[setting.name], std::string(state.forceStringNoCtx(*setting.value, setting.pos, ""))); else if (setting.value->type() == nPath) { - NixStringContext emptyContext = {}; + auto storePath = fetchToStore(*state.store, setting.value->path(), FetchMode::Copy); flake.config.settings.emplace( state.symbols[setting.name], - state.coerceToString(setting.pos, *setting.value, emptyContext, "", false, true, true).toOwned()); + state.store->printStorePath(storePath)); } else if (setting.value->type() == nInt) flake.config.settings.emplace( @@ -342,17 +375,55 @@ static Flake readFlake( return flake; } +static FlakeRef applySelfAttrs( + const FlakeRef & ref, + const Flake & flake) +{ + auto newRef(ref); + + std::set allowedAttrs{"submodules", "lfs"}; + + for (auto & attr : flake.selfAttrs) { + if (!allowedAttrs.contains(attr.first)) + throw Error("flake 'self' attribute '%s' is not supported", attr.first); + newRef.input.attrs.insert_or_assign(attr.first, attr.second); + } + + return newRef; +} + static Flake getFlake( EvalState & state, const FlakeRef & originalRef, bool useRegistries, FlakeCache & flakeCache, - const InputPath & lockRootPath) + const InputAttrPath & lockRootAttrPath) { - auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree( + // Fetch a lazy tree first. + auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree( state, originalRef, useRegistries, flakeCache); - return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootPath); + // Parse/eval flake.nix to get at the input.self attributes. + auto flake = readFlake(state, originalRef, resolvedRef, lockedRef, {accessor}, lockRootAttrPath); + + // Re-fetch the tree if necessary. + auto newLockedRef = applySelfAttrs(lockedRef, flake); + + if (lockedRef != newLockedRef) { + debug("refetching input '%s' due to self attribute", newLockedRef); + // FIXME: need to remove attrs that are invalidated by the changed input attrs, such as 'narHash'. + newLockedRef.input.attrs.erase("narHash"); + auto [accessor2, resolvedRef2, lockedRef2] = fetchOrSubstituteTree( + state, newLockedRef, false, flakeCache); + accessor = accessor2; + lockedRef = lockedRef2; + } + + // Copy the tree to the store. + auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, accessor); + + // Re-parse flake.nix from the store. + return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries) @@ -405,12 +476,12 @@ LockedFlake lockFlake( { FlakeInput input; SourcePath sourcePath; - std::optional parentInputPath; // FIXME: rename to inputPathPrefix? + std::optional parentInputAttrPath; // FIXME: rename to inputAttrPathPrefix? }; - std::map overrides; - std::set explicitCliOverrides; - std::set overridesUsed, updatesUsed; + std::map overrides; + std::set explicitCliOverrides; + std::set overridesUsed, updatesUsed; std::map, SourcePath> nodePaths; for (auto & i : lockFlags.inputOverrides) { @@ -434,9 +505,9 @@ LockedFlake lockFlake( std::function node, - const InputPath & inputPathPrefix, + const InputAttrPath & inputAttrPathPrefix, std::shared_ptr oldNode, - const InputPath & followsPrefix, + const InputAttrPath & followsPrefix, const SourcePath & sourcePath, bool trustLock)> computeLocks; @@ -448,7 +519,7 @@ LockedFlake lockFlake( /* The node whose locks are to be updated.*/ ref node, /* The path to this node in the lock file graph. */ - const InputPath & inputPathPrefix, + const InputAttrPath & inputAttrPathPrefix, /* The old node, if any, from which locks can be copied. */ std::shared_ptr oldNode, @@ -456,59 +527,59 @@ LockedFlake lockFlake( interpreted. When a node is initially locked, it's relative to the node's flake; when it's already locked, it's relative to the root of the lock file. */ - const InputPath & followsPrefix, + const InputAttrPath & followsPrefix, /* The source path of this node's flake. */ const SourcePath & sourcePath, bool trustLock) { - debug("computing lock file node '%s'", printInputPath(inputPathPrefix)); + debug("computing lock file node '%s'", printInputAttrPath(inputAttrPathPrefix)); /* Get the overrides (i.e. attributes of the form 'inputs.nixops.inputs.nixpkgs.url = ...'). */ for (auto & [id, input] : flakeInputs) { for (auto & [idOverride, inputOverride] : input.overrides) { - auto inputPath(inputPathPrefix); - inputPath.push_back(id); - inputPath.push_back(idOverride); - overrides.emplace(inputPath, + auto inputAttrPath(inputAttrPathPrefix); + inputAttrPath.push_back(id); + inputAttrPath.push_back(idOverride); + overrides.emplace(inputAttrPath, OverrideTarget { .input = inputOverride, .sourcePath = sourcePath, - .parentInputPath = inputPathPrefix + .parentInputAttrPath = inputAttrPathPrefix }); } } /* Check whether this input has overrides for a non-existent input. */ - for (auto [inputPath, inputOverride] : overrides) { - auto inputPath2(inputPath); - auto follow = inputPath2.back(); - inputPath2.pop_back(); - if (inputPath2 == inputPathPrefix && !flakeInputs.count(follow)) + for (auto [inputAttrPath, inputOverride] : overrides) { + auto inputAttrPath2(inputAttrPath); + auto follow = inputAttrPath2.back(); + inputAttrPath2.pop_back(); + if (inputAttrPath2 == inputAttrPathPrefix && !flakeInputs.count(follow)) warn( "input '%s' has an override for a non-existent input '%s'", - printInputPath(inputPathPrefix), follow); + printInputAttrPath(inputAttrPathPrefix), follow); } /* Go over the flake inputs, resolve/fetch them if necessary (i.e. if they're new or the flakeref changed from what's in the lock file). */ for (auto & [id, input2] : flakeInputs) { - auto inputPath(inputPathPrefix); - inputPath.push_back(id); - auto inputPathS = printInputPath(inputPath); - debug("computing input '%s'", inputPathS); + auto inputAttrPath(inputAttrPathPrefix); + inputAttrPath.push_back(id); + auto inputAttrPathS = printInputAttrPath(inputAttrPath); + debug("computing input '%s'", inputAttrPathS); try { /* Do we have an override for this input from one of the ancestors? */ - auto i = overrides.find(inputPath); + auto i = overrides.find(inputAttrPath); bool hasOverride = i != overrides.end(); - bool hasCliOverride = explicitCliOverrides.contains(inputPath); + bool hasCliOverride = explicitCliOverrides.contains(inputAttrPath); if (hasOverride) - overridesUsed.insert(inputPath); + overridesUsed.insert(inputAttrPath); auto input = hasOverride ? i->second.input : input2; /* Resolve relative 'path:' inputs relative to @@ -523,11 +594,11 @@ LockedFlake lockFlake( /* Resolve 'follows' later (since it may refer to an input path we haven't processed yet. */ if (input.follows) { - InputPath target; + InputAttrPath target; target.insert(target.end(), input.follows->begin(), input.follows->end()); - debug("input '%s' follows '%s'", inputPathS, printInputPath(target)); + debug("input '%s' follows '%s'", inputAttrPathS, printInputAttrPath(target)); node->inputs.insert_or_assign(id, target); continue; } @@ -536,7 +607,7 @@ LockedFlake lockFlake( auto overridenParentPath = input.ref->input.isRelative() - ? std::optional(hasOverride ? i->second.parentInputPath : inputPathPrefix) + ? std::optional(hasOverride ? i->second.parentInputAttrPath : inputAttrPathPrefix) : std::nullopt; auto resolveRelativePath = [&]() -> std::optional @@ -555,9 +626,9 @@ LockedFlake lockFlake( auto getInputFlake = [&](const FlakeRef & ref) { if (auto resolvedPath = resolveRelativePath()) { - return readFlake(state, ref, ref, ref, *resolvedPath, inputPath); + return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath); } else { - return getFlake(state, ref, useRegistries, flakeCache, inputPath); + return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath); } }; @@ -565,19 +636,19 @@ LockedFlake lockFlake( And the input is not in updateInputs? */ std::shared_ptr oldLock; - updatesUsed.insert(inputPath); + updatesUsed.insert(inputAttrPath); - if (oldNode && !lockFlags.inputUpdates.count(inputPath)) + if (oldNode && !lockFlags.inputUpdates.count(inputAttrPath)) if (auto oldLock2 = get(oldNode->inputs, id)) if (auto oldLock3 = std::get_if<0>(&*oldLock2)) oldLock = *oldLock3; if (oldLock && oldLock->originalRef == *input.ref - && oldLock->parentPath == overridenParentPath + && oldLock->parentInputAttrPath == overridenParentPath && !hasCliOverride) { - debug("keeping existing input '%s'", inputPathS); + debug("keeping existing input '%s'", inputAttrPathS); /* Copy the input from the old lock since its flakeref didn't change and there is no override from a @@ -586,18 +657,18 @@ LockedFlake lockFlake( oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake, - oldLock->parentPath); + oldLock->parentInputAttrPath); node->inputs.insert_or_assign(id, childNode); /* If we have this input in updateInputs, then we must fetch the flake to update it. */ - auto lb = lockFlags.inputUpdates.lower_bound(inputPath); + auto lb = lockFlags.inputUpdates.lower_bound(inputAttrPath); auto mustRefetch = lb != lockFlags.inputUpdates.end() - && lb->size() > inputPath.size() - && std::equal(inputPath.begin(), inputPath.end(), lb->begin()); + && lb->size() > inputAttrPath.size() + && std::equal(inputAttrPath.begin(), inputAttrPath.end(), lb->begin()); FlakeInputs fakeInputs; @@ -616,7 +687,7 @@ LockedFlake lockFlake( if (!trustLock) { // It is possible that the flake has changed, // so we must confirm all the follows that are in the lock file are also in the flake. - auto overridePath(inputPath); + auto overridePath(inputAttrPath); overridePath.push_back(i.first); auto o = overrides.find(overridePath); // If the override disappeared, we have to refetch the flake, @@ -640,21 +711,21 @@ LockedFlake lockFlake( if (mustRefetch) { auto inputFlake = getInputFlake(oldLock->lockedRef); nodePaths.emplace(childNode, inputFlake.path.parent()); - computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, followsPrefix, + computeLocks(inputFlake.inputs, childNode, inputAttrPath, oldLock, followsPrefix, inputFlake.path, false); } else { - computeLocks(fakeInputs, childNode, inputPath, oldLock, followsPrefix, sourcePath, true); + computeLocks(fakeInputs, childNode, inputAttrPath, oldLock, followsPrefix, sourcePath, true); } } else { /* We need to create a new lock file entry. So fetch this input. */ - debug("creating new input '%s'", inputPathS); + debug("creating new input '%s'", inputAttrPathS); if (!lockFlags.allowUnlocked && !input.ref->input.isLocked() && !input.ref->input.isRelative()) - throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS); + throw Error("cannot update unlocked flake input '%s' in pure mode", inputAttrPathS); /* Note: in case of an --override-input, we use the *original* ref (input2.ref) for the @@ -663,7 +734,7 @@ LockedFlake lockFlake( nuked the next time we update the lock file. That is, overrides are sticky unless you use --no-write-lock-file. */ - auto ref = (input2.ref && explicitCliOverrides.contains(inputPath)) ? *input2.ref : *input.ref; + auto ref = (input2.ref && explicitCliOverrides.contains(inputAttrPath)) ? *input2.ref : *input.ref; if (input.isFlake) { auto inputFlake = getInputFlake(*input.ref); @@ -689,11 +760,11 @@ LockedFlake lockFlake( own lock file. */ nodePaths.emplace(childNode, inputFlake.path.parent()); computeLocks( - inputFlake.inputs, childNode, inputPath, + inputFlake.inputs, childNode, inputAttrPath, oldLock ? std::dynamic_pointer_cast(oldLock) : readLockFile(state.fetchSettings, inputFlake.lockFilePath()).root.get_ptr(), - oldLock ? followsPrefix : inputPath, + oldLock ? followsPrefix : inputAttrPath, inputFlake.path, false); } @@ -705,9 +776,13 @@ LockedFlake lockFlake( if (auto resolvedPath = resolveRelativePath()) { return {*resolvedPath, *input.ref}; } else { - auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree( + auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree( state, *input.ref, useRegistries, flakeCache); - return {state.rootPath(state.store->toRealPath(storePath)), lockedRef}; + + // FIXME: allow input to be lazy. + auto storePath = copyInputToStore(state, lockedRef.input, input.ref->input, accessor); + + return {state.storePath(storePath), lockedRef}; } }(); @@ -720,7 +795,7 @@ LockedFlake lockFlake( } } catch (Error & e) { - e.addTrace({}, "while updating the flake input '%s'", inputPathS); + e.addTrace({}, "while updating the flake input '%s'", inputAttrPathS); throw; } } @@ -740,11 +815,11 @@ LockedFlake lockFlake( for (auto & i : lockFlags.inputOverrides) if (!overridesUsed.count(i.first)) warn("the flag '--override-input %s %s' does not match any input", - printInputPath(i.first), i.second); + printInputAttrPath(i.first), i.second); for (auto & i : lockFlags.inputUpdates) if (!updatesUsed.count(i)) - warn("'%s' does not match any input of this flake", printInputPath(i)); + warn("'%s' does not match any input of this flake", printInputAttrPath(i)); /* Check 'follows' inputs. */ newLockFile.check(); @@ -844,21 +919,6 @@ LockedFlake lockFlake( } } -std::pair sourcePathToStorePath( - ref store, - const SourcePath & _path) -{ - auto path = _path.path.abs(); - - if (auto store2 = store.dynamic_pointer_cast()) { - auto realStoreDir = store2->getRealStoreDir(); - if (isInDir(path, realStoreDir)) - path = store2->storeDir + path.substr(realStoreDir.size()); - } - - return store->toStorePath(path); -} - void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) @@ -874,7 +934,7 @@ void callFlake(EvalState & state, auto lockedNode = node.dynamic_pointer_cast(); - auto [storePath, subdir] = sourcePathToStorePath(state.store, sourcePath); + auto [storePath, subdir] = state.store->toStorePath(sourcePath.path.abs()); emitTreeAttrs( state, diff --git a/src/libflake/flake/flake.hh b/src/libflake/flake/flake.hh index 9ab661fce..d8cd9aac0 100644 --- a/src/libflake/flake/flake.hh +++ b/src/libflake/flake/flake.hh @@ -57,7 +57,7 @@ struct FlakeInput * false = (fetched) static source path */ bool isFlake = true; - std::optional follows; + std::optional follows; FlakeInputs overrides; }; @@ -79,24 +79,37 @@ struct Flake * The original flake specification (by the user) */ FlakeRef originalRef; + /** * registry references and caching resolved to the specific underlying flake */ FlakeRef resolvedRef; + /** * the specific local store result of invoking the fetcher */ FlakeRef lockedRef; + /** * The path of `flake.nix`. */ SourcePath path; + /** - * pretend that 'lockedRef' is dirty + * Pretend that `lockedRef` is dirty. */ bool forceDirty = false; + std::optional description; + FlakeInputs inputs; + + /** + * Attributes to be retroactively applied to the `self` input + * (such as `submodules = true`). + */ + fetchers::Attrs selfAttrs; + /** * 'nixConfig' attribute */ @@ -201,13 +214,13 @@ struct LockFlags /** * Flake inputs to be overridden. */ - std::map inputOverrides; + std::map inputOverrides; /** * Flake inputs to be updated. This means that any existing lock * for those inputs will be ignored. */ - std::set inputUpdates; + std::set inputUpdates; }; LockedFlake lockFlake( @@ -221,16 +234,6 @@ void callFlake( const LockedFlake & lockedFlake, Value & v); -/** - * Map a `SourcePath` to the corresponding store path. This is a - * temporary hack to support chroot stores while we don't have full - * lazy trees. FIXME: Remove this once we can pass a sourcePath rather - * than a storePath to call-flake.nix. - */ -std::pair sourcePathToStorePath( - ref store, - const SourcePath & path); - } void emitTreeAttrs( diff --git a/src/libflake/flake/flakeref.cc b/src/libflake/flake/flakeref.cc index 720f771ab..4fc720eb5 100644 --- a/src/libflake/flake/flakeref.cc +++ b/src/libflake/flake/flakeref.cc @@ -107,7 +107,7 @@ std::pair parsePathFlakeRefWithFragment( to 'baseDir'). If so, search upward to the root of the repo (i.e. the directory containing .git). */ - path = absPath(path, baseDir); + path = absPath(path, baseDir, true); if (isFlake) { @@ -283,10 +283,10 @@ FlakeRef FlakeRef::fromAttrs( fetchers::maybeGetStrAttr(attrs, "dir").value_or("")); } -std::pair FlakeRef::fetchTree(ref store) const +std::pair, FlakeRef> FlakeRef::lazyFetch(ref store) const { - auto [storePath, lockedInput] = input.fetchToStore(store); - return {std::move(storePath), FlakeRef(std::move(lockedInput), subdir)}; + auto [accessor, lockedInput] = input.getAccessor(store); + return {accessor, FlakeRef(std::move(lockedInput), subdir)}; } std::tuple parseFlakeRefWithFragmentAndExtendedOutputsSpec( diff --git a/src/libflake/flake/flakeref.hh b/src/libflake/flake/flakeref.hh index c9cf7952d..d3c15018e 100644 --- a/src/libflake/flake/flakeref.hh +++ b/src/libflake/flake/flakeref.hh @@ -71,7 +71,7 @@ struct FlakeRef const fetchers::Settings & fetchSettings, const fetchers::Attrs & attrs); - std::pair fetchTree(ref store) const; + std::pair, FlakeRef> lazyFetch(ref store) const; }; std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); diff --git a/src/libflake/flake/lockfile.cc b/src/libflake/flake/lockfile.cc index 67af108b8..b0971a696 100644 --- a/src/libflake/flake/lockfile.cc +++ b/src/libflake/flake/lockfile.cc @@ -1,7 +1,10 @@ #include +#include "fetch-settings.hh" +#include "flake/settings.hh" #include "lockfile.hh" #include "store-api.hh" +#include "strings.hh" #include #include @@ -9,8 +12,6 @@ #include #include -#include "strings.hh" -#include "flake/settings.hh" namespace nix::flake { @@ -43,11 +44,18 @@ LockedNode::LockedNode( : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) - , parentPath(json.find("parent") != json.end() ? (std::optional) json["parent"] : std::nullopt) + , parentInputAttrPath(json.find("parent") != json.end() ? (std::optional) json["parent"] : std::nullopt) { - if (!lockedRef.input.isConsideredLocked(fetchSettings) && !lockedRef.input.isRelative()) - throw Error("Lock file contains unlocked input '%s'. Use '--allow-dirty-locks' to accept this lock file.", - fetchers::attrsToJSON(lockedRef.input.toAttrs())); + if (!lockedRef.input.isLocked() && !lockedRef.input.isRelative()) { + if (lockedRef.input.getNarHash()) + warn( + "Lock file entry '%s' is unlocked (e.g. lacks a Git revision) but does have a NAR hash. " + "This is deprecated since such inputs are verifiable but may not be reproducible.", + lockedRef.to_string()); + else + throw Error("Lock file contains unlocked input '%s'. Use '--allow-dirty-locks' to accept this lock file.", + fetchers::attrsToJSON(lockedRef.input.toAttrs())); + } // For backward compatibility, lock file entries are implicitly final. assert(!lockedRef.input.attrs.contains("__final")); @@ -59,7 +67,7 @@ StorePath LockedNode::computeStorePath(Store & store) const return lockedRef.input.computeStorePath(store); } -static std::shared_ptr doFind(const ref & root, const InputPath & path, std::vector & visited) +static std::shared_ptr doFind(const ref & root, const InputAttrPath & path, std::vector & visited) { auto pos = root; @@ -67,8 +75,8 @@ static std::shared_ptr doFind(const ref & root, const InputPath & pa if (found != visited.end()) { std::vector cycle; - std::transform(found, visited.cend(), std::back_inserter(cycle), printInputPath); - cycle.push_back(printInputPath(path)); + std::transform(found, visited.cend(), std::back_inserter(cycle), printInputAttrPath); + cycle.push_back(printInputAttrPath(path)); throw Error("follow cycle detected: [%s]", concatStringsSep(" -> ", cycle)); } visited.push_back(path); @@ -90,9 +98,9 @@ static std::shared_ptr doFind(const ref & root, const InputPath & pa return pos; } -std::shared_ptr LockFile::findInput(const InputPath & path) +std::shared_ptr LockFile::findInput(const InputAttrPath & path) { - std::vector visited; + std::vector visited; return doFind(root, path, visited); } @@ -115,7 +123,7 @@ LockFile::LockFile( if (jsonNode.find("inputs") == jsonNode.end()) return; for (auto & i : jsonNode["inputs"].items()) { if (i.value().is_array()) { // FIXME: remove, obsolete - InputPath path; + InputAttrPath path; for (auto & j : i.value()) path.push_back(j); node.inputs.insert_or_assign(i.key(), path); @@ -203,8 +211,8 @@ std::pair LockFile::toJSON() const n["locked"].erase("__final"); if (!lockedNode->isFlake) n["flake"] = false; - if (lockedNode->parentPath) - n["parent"] = *lockedNode->parentPath; + if (lockedNode->parentInputAttrPath) + n["parent"] = *lockedNode->parentInputAttrPath; } nodes[key] = std::move(n); @@ -248,11 +256,20 @@ std::optional LockFile::isUnlocked(const fetchers::Settings & fetchSet visit(root); + /* Return whether the input is either locked, or, if + `allow-dirty-locks` is enabled, it has a NAR hash. In the + latter case, we can verify the input but we may not be able to + fetch it from anywhere. */ + auto isConsideredLocked = [&](const fetchers::Input & input) + { + return input.isLocked() || (fetchSettings.allowDirtyLocks && input.getNarHash()); + }; + for (auto & i : nodes) { if (i == ref(root)) continue; auto node = i.dynamic_pointer_cast(); if (node - && (!node->lockedRef.input.isConsideredLocked(fetchSettings) + && (!isConsideredLocked(node->lockedRef.input) || !node->lockedRef.input.isFinal()) && !node->lockedRef.input.isRelative()) return node->lockedRef; @@ -267,36 +284,36 @@ bool LockFile::operator ==(const LockFile & other) const return toJSON().first == other.toJSON().first; } -InputPath parseInputPath(std::string_view s) +InputAttrPath parseInputAttrPath(std::string_view s) { - InputPath path; + InputAttrPath path; for (auto & elem : tokenizeString>(s, "/")) { if (!std::regex_match(elem, flakeIdRegex)) - throw UsageError("invalid flake input path element '%s'", elem); + throw UsageError("invalid flake input attribute path element '%s'", elem); path.push_back(elem); } return path; } -std::map LockFile::getAllInputs() const +std::map LockFile::getAllInputs() const { std::set> done; - std::map res; + std::map res; - std::function node)> recurse; + std::function node)> recurse; - recurse = [&](const InputPath & prefix, ref node) + recurse = [&](const InputAttrPath & prefix, ref node) { if (!done.insert(node).second) return; for (auto &[id, input] : node->inputs) { - auto inputPath(prefix); - inputPath.push_back(id); - res.emplace(inputPath, input); + auto inputAttrPath(prefix); + inputAttrPath.push_back(id); + res.emplace(inputAttrPath, input); if (auto child = std::get_if<0>(&input)) - recurse(inputPath, *child); + recurse(inputAttrPath, *child); } }; @@ -320,7 +337,7 @@ std::ostream & operator <<(std::ostream & stream, const Node::Edge & edge) if (auto node = std::get_if<0>(&edge)) stream << describe((*node)->lockedRef); else if (auto follows = std::get_if<1>(&edge)) - stream << fmt("follows '%s'", printInputPath(*follows)); + stream << fmt("follows '%s'", printInputAttrPath(*follows)); return stream; } @@ -347,15 +364,15 @@ std::string LockFile::diff(const LockFile & oldLocks, const LockFile & newLocks) while (i != oldFlat.end() || j != newFlat.end()) { if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) { res += fmt("• " ANSI_GREEN "Added input '%s':" ANSI_NORMAL "\n %s\n", - printInputPath(j->first), j->second); + printInputAttrPath(j->first), j->second); ++j; } else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) { - res += fmt("• " ANSI_RED "Removed input '%s'" ANSI_NORMAL "\n", printInputPath(i->first)); + res += fmt("• " ANSI_RED "Removed input '%s'" ANSI_NORMAL "\n", printInputAttrPath(i->first)); ++i; } else { if (!equals(i->second, j->second)) { res += fmt("• " ANSI_BOLD "Updated input '%s':" ANSI_NORMAL "\n %s\n → %s\n", - printInputPath(i->first), + printInputAttrPath(i->first), i->second, j->second); } @@ -371,19 +388,19 @@ void LockFile::check() { auto inputs = getAllInputs(); - for (auto & [inputPath, input] : inputs) { + for (auto & [inputAttrPath, input] : inputs) { if (auto follows = std::get_if<1>(&input)) { if (!follows->empty() && !findInput(*follows)) throw Error("input '%s' follows a non-existent input '%s'", - printInputPath(inputPath), - printInputPath(*follows)); + printInputAttrPath(inputAttrPath), + printInputAttrPath(*follows)); } } } void check(); -std::string printInputPath(const InputPath & path) +std::string printInputAttrPath(const InputAttrPath & path) { return concatStringsSep("/", path); } diff --git a/src/libflake/flake/lockfile.hh b/src/libflake/flake/lockfile.hh index cb7c8da5a..cbc6d01eb 100644 --- a/src/libflake/flake/lockfile.hh +++ b/src/libflake/flake/lockfile.hh @@ -12,7 +12,7 @@ class StorePath; namespace nix::flake { -typedef std::vector InputPath; +typedef std::vector InputAttrPath; struct LockedNode; @@ -23,7 +23,7 @@ struct LockedNode; */ struct Node : std::enable_shared_from_this { - typedef std::variant, InputPath> Edge; + typedef std::variant, InputAttrPath> Edge; std::map inputs; @@ -40,17 +40,17 @@ struct LockedNode : Node /* The node relative to which relative source paths (e.g. 'path:../foo') are interpreted. */ - std::optional parentPath; + std::optional parentInputAttrPath; LockedNode( const FlakeRef & lockedRef, const FlakeRef & originalRef, bool isFlake = true, - std::optional parentPath = {}) - : lockedRef(lockedRef) - , originalRef(originalRef) + std::optional parentInputAttrPath = {}) + : lockedRef(std::move(lockedRef)) + , originalRef(std::move(originalRef)) , isFlake(isFlake) - , parentPath(parentPath) + , parentInputAttrPath(std::move(parentInputAttrPath)) { } LockedNode( @@ -83,9 +83,9 @@ struct LockFile bool operator ==(const LockFile & other) const; - std::shared_ptr findInput(const InputPath & path); + std::shared_ptr findInput(const InputAttrPath & path); - std::map getAllInputs() const; + std::map getAllInputs() const; static std::string diff(const LockFile & oldLocks, const LockFile & newLocks); @@ -97,8 +97,8 @@ struct LockFile std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile); -InputPath parseInputPath(std::string_view s); +InputAttrPath parseInputAttrPath(std::string_view s); -std::string printInputPath(const InputPath & path); +std::string printInputAttrPath(const InputAttrPath & path); } diff --git a/src/libmain/loggers.cc b/src/libmain/loggers.cc index a4e0530c8..07d83a960 100644 --- a/src/libmain/loggers.cc +++ b/src/libmain/loggers.cc @@ -6,7 +6,8 @@ namespace nix { LogFormat defaultLogFormat = LogFormat::raw; -LogFormat parseLogFormat(const std::string & logFormatStr) { +LogFormat parseLogFormat(const std::string & logFormatStr) +{ if (logFormatStr == "raw" || getEnv("NIX_GET_COMPLETIONS")) return LogFormat::raw; else if (logFormatStr == "raw-with-logs") @@ -20,14 +21,15 @@ LogFormat parseLogFormat(const std::string & logFormatStr) { throw Error("option 'log-format' has an invalid value '%s'", logFormatStr); } -Logger * makeDefaultLogger() { +std::unique_ptr makeDefaultLogger() +{ switch (defaultLogFormat) { case LogFormat::raw: return makeSimpleLogger(false); case LogFormat::rawWithLogs: return makeSimpleLogger(true); case LogFormat::internalJSON: - return makeJSONLogger(*makeSimpleLogger(true)); + return makeJSONLogger(getStandardError()); case LogFormat::bar: return makeProgressBar(); case LogFormat::barWithLogs: { @@ -40,16 +42,14 @@ Logger * makeDefaultLogger() { } } -void setLogFormat(const std::string & logFormatStr) { +void setLogFormat(const std::string & logFormatStr) +{ setLogFormat(parseLogFormat(logFormatStr)); } -void setLogFormat(const LogFormat & logFormat) { +void setLogFormat(const LogFormat & logFormat) +{ defaultLogFormat = logFormat; - createDefaultLogger(); -} - -void createDefaultLogger() { logger = makeDefaultLogger(); } diff --git a/src/libmain/loggers.hh b/src/libmain/loggers.hh index e5721420c..98b287fa7 100644 --- a/src/libmain/loggers.hh +++ b/src/libmain/loggers.hh @@ -16,6 +16,4 @@ enum class LogFormat { void setLogFormat(const std::string & logFormatStr); void setLogFormat(const LogFormat & logFormat); -void createDefaultLogger(); - } diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index 961850b58..2d4d901db 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -73,8 +73,13 @@ private: uint64_t corruptedPaths = 0, untrustedPaths = 0; bool active = true; - bool paused = false; + size_t suspensions = 0; bool haveUpdate = true; + + bool isPaused() const + { + return suspensions > 0; + } }; /** Helps avoid unnecessary redraws, see `redraw()` */ @@ -117,29 +122,43 @@ public: { { auto state(state_.lock()); - if (!state->active) return; - state->active = false; - writeToStderr("\r\e[K"); - updateCV.notify_one(); - quitCV.notify_one(); + if (state->active) { + state->active = false; + writeToStderr("\r\e[K"); + updateCV.notify_one(); + quitCV.notify_one(); + } } - updateThread.join(); + if (updateThread.joinable()) + updateThread.join(); } void pause() override { auto state (state_.lock()); - state->paused = true; + state->suspensions++; + if (state->suspensions > 1) { + // already paused + return; + } + if (state->active) writeToStderr("\r\e[K"); } void resume() override { auto state (state_.lock()); - state->paused = false; - if (state->active) - writeToStderr("\r\e[K"); - state->haveUpdate = true; - updateCV.notify_one(); + if (state->suspensions == 0) { + log(lvlError, "nix::ProgressBar: resume() called without a matching preceding pause(). This is a bug."); + return; + } else { + state->suspensions--; + } + if (state->suspensions == 0) { + if (state->active) + writeToStderr("\r\e[K"); + state->haveUpdate = true; + updateCV.notify_one(); + } } bool isVerbose() override @@ -381,7 +400,7 @@ public: auto nextWakeup = std::chrono::milliseconds::max(); state.haveUpdate = false; - if (state.paused || !state.active) return nextWakeup; + if (state.isPaused() || !state.active) return nextWakeup; std::string line; @@ -553,21 +572,9 @@ public: } }; -Logger * makeProgressBar() +std::unique_ptr makeProgressBar() { - return new ProgressBar(isTTY()); -} - -void startProgressBar() -{ - logger = makeProgressBar(); -} - -void stopProgressBar() -{ - auto progressBar = dynamic_cast(logger); - if (progressBar) progressBar->stop(); - + return std::make_unique(isTTY()); } } diff --git a/src/libmain/progress-bar.hh b/src/libmain/progress-bar.hh index c3c6e3833..fc1b0fe78 100644 --- a/src/libmain/progress-bar.hh +++ b/src/libmain/progress-bar.hh @@ -5,10 +5,6 @@ namespace nix { -Logger * makeProgressBar(); - -void startProgressBar(); - -void stopProgressBar(); +std::unique_ptr makeProgressBar(); } diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index f61754bf1..28869c4db 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -361,7 +361,7 @@ RunPager::RunPager() if (!pager) pager = getenv("PAGER"); if (pager && ((std::string) pager == "" || (std::string) pager == "cat")) return; - stopProgressBar(); + logger->stop(); Pipe toPager; toPager.create(); diff --git a/src/libstore-tests/derivation-advanced-attrs.cc b/src/libstore-tests/derivation-advanced-attrs.cc index 9d2c64ef3..107cf13e3 100644 --- a/src/libstore-tests/derivation-advanced-attrs.cc +++ b/src/libstore-tests/derivation-advanced-attrs.cc @@ -3,13 +3,15 @@ #include "experimental-features.hh" #include "derivations.hh" - -#include "tests/libstore.hh" -#include "tests/characterization.hh" +#include "derivations.hh" +#include "derivation-options.hh" #include "parsed-derivations.hh" #include "types.hh" #include "json-utils.hh" +#include "tests/libstore.hh" +#include "tests/characterization.hh" + namespace nix { using nlohmann::json; @@ -80,21 +82,30 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_defaults) auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), ""); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), false); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), false); - EXPECT_EQ(parsedDrv.getStringsAttr("allowedReferences"), std::nullopt); - EXPECT_EQ(parsedDrv.getStringsAttr("allowedRequisites"), std::nullopt); - EXPECT_EQ(parsedDrv.getStringsAttr("disallowedReferences"), std::nullopt); - EXPECT_EQ(parsedDrv.getStringsAttr("disallowedRequisites"), std::nullopt); - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), StringSet()); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), true); - EXPECT_EQ(parsedDrv.useUidRange(), false); + EXPECT_TRUE(!parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, ""); + EXPECT_EQ(options.noChroot, false); + EXPECT_EQ(options.impureHostDeps, StringSet{}); + EXPECT_EQ(options.impureEnvVars, StringSet{}); + EXPECT_EQ(options.allowLocalNetworking, false); + { + auto * checksForAllOutputs_ = std::get_if<0>(&options.outputChecks); + ASSERT_TRUE(checksForAllOutputs_ != nullptr); + auto & checksForAllOutputs = *checksForAllOutputs_; + + EXPECT_EQ(checksForAllOutputs.allowedReferences, std::nullopt); + EXPECT_EQ(checksForAllOutputs.allowedRequisites, std::nullopt); + EXPECT_EQ(checksForAllOutputs.disallowedReferences, StringSet{}); + EXPECT_EQ(checksForAllOutputs.disallowedRequisites, StringSet{}); + } + EXPECT_EQ(options.getRequiredSystemFeatures(got), StringSet()); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), true); + EXPECT_EQ(options.useUidRange(got), false); }); }; @@ -106,29 +117,36 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes) auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); StringSet systemFeatures{"rainbow", "uid-range"}; - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "sandcastle"); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), true); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings{"/usr/bin/ditto"}); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings{"UNICORN"}); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), true); - EXPECT_EQ( - parsedDrv.getStringsAttr("allowedReferences"), Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); - EXPECT_EQ( - parsedDrv.getStringsAttr("allowedRequisites"), Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); - EXPECT_EQ( - parsedDrv.getStringsAttr("disallowedReferences"), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); - EXPECT_EQ( - parsedDrv.getStringsAttr("disallowedRequisites"), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), systemFeatures); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), false); - EXPECT_EQ(parsedDrv.useUidRange(), true); + EXPECT_TRUE(!parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, "sandcastle"); + EXPECT_EQ(options.noChroot, true); + EXPECT_EQ(options.impureHostDeps, StringSet{"/usr/bin/ditto"}); + EXPECT_EQ(options.impureEnvVars, StringSet{"UNICORN"}); + EXPECT_EQ(options.allowLocalNetworking, true); + { + auto * checksForAllOutputs_ = std::get_if<0>(&options.outputChecks); + ASSERT_TRUE(checksForAllOutputs_ != nullptr); + auto & checksForAllOutputs = *checksForAllOutputs_; + + EXPECT_EQ( + checksForAllOutputs.allowedReferences, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + EXPECT_EQ( + checksForAllOutputs.allowedRequisites, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + EXPECT_EQ( + checksForAllOutputs.disallowedReferences, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + EXPECT_EQ( + checksForAllOutputs.disallowedRequisites, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + } + EXPECT_EQ(options.getRequiredSystemFeatures(got), systemFeatures); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), false); + EXPECT_EQ(options.useUidRange(got), true); }); }; @@ -140,27 +158,29 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_structuredAttr auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), ""); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), false); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), false); + EXPECT_TRUE(parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, ""); + EXPECT_EQ(options.noChroot, false); + EXPECT_EQ(options.impureHostDeps, StringSet{}); + EXPECT_EQ(options.impureEnvVars, StringSet{}); + EXPECT_EQ(options.allowLocalNetworking, false); { - auto structuredAttrs_ = parsedDrv.getStructuredAttrs(); - ASSERT_TRUE(structuredAttrs_); - auto & structuredAttrs = *structuredAttrs_; + auto * checksPerOutput_ = std::get_if<1>(&options.outputChecks); + ASSERT_TRUE(checksPerOutput_ != nullptr); + auto & checksPerOutput = *checksPerOutput_; - auto outputChecks_ = get(structuredAttrs, "outputChecks"); - ASSERT_FALSE(outputChecks_); + EXPECT_EQ(checksPerOutput.size(), 0); } - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), StringSet()); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), true); - EXPECT_EQ(parsedDrv.useUidRange(), false); + EXPECT_EQ(options.getRequiredSystemFeatures(got), StringSet()); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), true); + EXPECT_EQ(options.useUidRange(got), false); }); }; @@ -172,62 +192,52 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_structuredAttr auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); StringSet systemFeatures{"rainbow", "uid-range"}; - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "sandcastle"); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), true); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings{"/usr/bin/ditto"}); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings{"UNICORN"}); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), true); + EXPECT_TRUE(parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, "sandcastle"); + EXPECT_EQ(options.noChroot, true); + EXPECT_EQ(options.impureHostDeps, StringSet{"/usr/bin/ditto"}); + EXPECT_EQ(options.impureEnvVars, StringSet{"UNICORN"}); + EXPECT_EQ(options.allowLocalNetworking, true); { - auto structuredAttrs_ = parsedDrv.getStructuredAttrs(); - ASSERT_TRUE(structuredAttrs_); - auto & structuredAttrs = *structuredAttrs_; - - auto outputChecks_ = get(structuredAttrs, "outputChecks"); - ASSERT_TRUE(outputChecks_); - auto & outputChecks = *outputChecks_; - { - auto output_ = get(outputChecks, "out"); + auto output_ = get(std::get<1>(options.outputChecks), "out"); ASSERT_TRUE(output_); auto & output = *output_; - EXPECT_EQ( - get(output, "allowedReferences")->get(), - Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); - EXPECT_EQ( - get(output, "allowedRequisites")->get(), - Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + + EXPECT_EQ(output.allowedReferences, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + EXPECT_EQ(output.allowedRequisites, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); } { - auto output_ = get(outputChecks, "bin"); + auto output_ = get(std::get<1>(options.outputChecks), "bin"); ASSERT_TRUE(output_); auto & output = *output_; - EXPECT_EQ( - get(output, "disallowedReferences")->get(), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); - EXPECT_EQ( - get(output, "disallowedRequisites")->get(), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + + EXPECT_EQ(output.disallowedReferences, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + EXPECT_EQ(output.disallowedRequisites, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); } { - auto output_ = get(outputChecks, "dev"); + auto output_ = get(std::get<1>(options.outputChecks), "dev"); ASSERT_TRUE(output_); auto & output = *output_; - EXPECT_EQ(get(output, "maxSize")->get(), 789); - EXPECT_EQ(get(output, "maxClosureSize")->get(), 5909); + + EXPECT_EQ(output.maxSize, 789); + EXPECT_EQ(output.maxClosureSize, 5909); } } - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), systemFeatures); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), false); - EXPECT_EQ(parsedDrv.useUidRange(), true); + EXPECT_EQ(options.getRequiredSystemFeatures(got), systemFeatures); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), false); + EXPECT_EQ(options.useUidRange(got), true); }); }; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 714dc87c8..f7d306bf7 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -36,14 +36,6 @@ namespace nix { -Goal::Co DerivationGoal::init() { - if (useDerivation) { - co_return getDerivation(); - } else { - co_return haveDerivation(); - } -} - DerivationGoal::DerivationGoal(const StorePath & drvPath, const OutputsSpec & wantedOutputs, Worker & worker, BuildMode buildMode) : Goal(worker, DerivedPath::Built { .drvPath = makeConstantStorePathRef(drvPath), .outputs = wantedOutputs }) @@ -141,50 +133,44 @@ void DerivationGoal::addWantedOutputs(const OutputsSpec & outputs) } -Goal::Co DerivationGoal::getDerivation() -{ +Goal::Co DerivationGoal::init() { trace("init"); - /* The first thing to do is to make sure that the derivation - exists. If it doesn't, it may be created through a - substitute. */ - if (buildMode == bmNormal && worker.evalStore.isValidPath(drvPath)) { - co_return loadDerivation(); - } + if (useDerivation) { + /* The first thing to do is to make sure that the derivation + exists. If it doesn't, it may be created through a + substitute. */ - addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath))); - - co_await Suspend{}; - co_return loadDerivation(); -} - - -Goal::Co DerivationGoal::loadDerivation() -{ - trace("loading derivation"); - - if (nrFailed != 0) { - co_return done(BuildResult::MiscFailure, {}, Error("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath))); - } - - /* `drvPath' should already be a root, but let's be on the safe - side: if the user forgot to make it a root, we wouldn't want - things being garbage collected while we're busy. */ - worker.evalStore.addTempRoot(drvPath); - - /* Get the derivation. It is probably in the eval store, but it might be inthe main store: - - - Resolved derivation are resolved against main store realisations, and so must be stored there. - - - Dynamic derivations are built, and so are found in the main store. - */ - for (auto * drvStore : { &worker.evalStore, &worker.store }) { - if (drvStore->isValidPath(drvPath)) { - drv = std::make_unique(drvStore->readDerivation(drvPath)); - break; + if (buildMode != bmNormal || !worker.evalStore.isValidPath(drvPath)) { + addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath))); + co_await Suspend{}; } + + trace("loading derivation"); + + if (nrFailed != 0) { + co_return done(BuildResult::MiscFailure, {}, Error("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath))); + } + + /* `drvPath' should already be a root, but let's be on the safe + side: if the user forgot to make it a root, we wouldn't want + things being garbage collected while we're busy. */ + worker.evalStore.addTempRoot(drvPath); + + /* Get the derivation. It is probably in the eval store, but it might be inthe main store: + + - Resolved derivation are resolved against main store realisations, and so must be stored there. + + - Dynamic derivations are built, and so are found in the main store. + */ + for (auto * drvStore : { &worker.evalStore, &worker.store }) { + if (drvStore->isValidPath(drvPath)) { + drv = std::make_unique(drvStore->readDerivation(drvPath)); + break; + } + } + assert(drv); } - assert(drv); co_return haveDerivation(); } @@ -195,58 +181,64 @@ Goal::Co DerivationGoal::haveDerivation() trace("have derivation"); parsedDrv = std::make_unique(drvPath, *drv); + drvOptions = std::make_unique(DerivationOptions::fromParsedDerivation(*parsedDrv)); if (!drv->type().hasKnownOutputPaths()) experimentalFeatureSettings.require(Xp::CaDerivations); - if (drv->type().isImpure()) { - experimentalFeatureSettings.require(Xp::ImpureDerivations); - - for (auto & [outputName, output] : drv->outputs) { - auto randomPath = StorePath::random(outputPathName(drv->name, outputName)); - assert(!worker.store.isValidPath(randomPath)); - initialOutputs.insert({ - outputName, - InitialOutput { - .wanted = true, - .outputHash = impureOutputHash, - .known = InitialOutputStatus { - .path = randomPath, - .status = PathStatus::Absent - } - } - }); - } - - co_return gaveUpOnSubstitution(); - } - for (auto & i : drv->outputsAndOptPaths(worker.store)) if (i.second.second) worker.store.addTempRoot(*i.second.second); - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - for (auto & [outputName, outputHash] : outputHashes) - initialOutputs.insert({ - outputName, - InitialOutput { + { + bool impure = drv->type().isImpure(); + + if (impure) experimentalFeatureSettings.require(Xp::ImpureDerivations); + + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + for (auto & [outputName, outputHash] : outputHashes) { + InitialOutput v{ .wanted = true, // Will be refined later .outputHash = outputHash + }; + + /* TODO we might want to also allow randomizing the paths + for regular CA derivations, e.g. for sake of checking + determinism. */ + if (impure) { + v.known = InitialOutputStatus { + .path = StorePath::random(outputPathName(drv->name, outputName)), + .status = PathStatus::Absent, + }; } - }); - /* Check what outputs paths are not already valid. */ - auto [allValid, validOutputs] = checkPathValidity(); + initialOutputs.insert({ + outputName, + std::move(v), + }); + } - /* If they are all valid, then we're done. */ - if (allValid && buildMode == bmNormal) { - co_return done(BuildResult::AlreadyValid, std::move(validOutputs)); + if (impure) { + /* We don't yet have any safe way to cache an impure derivation at + this step. */ + co_return gaveUpOnSubstitution(); + } + } + + { + /* Check what outputs paths are not already valid. */ + auto [allValid, validOutputs] = checkPathValidity(); + + /* If they are all valid, then we're done. */ + if (allValid && buildMode == bmNormal) { + co_return done(BuildResult::AlreadyValid, std::move(validOutputs)); + } } /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ - if (settings.useSubstitutes && parsedDrv->substitutesAllowed()) + if (settings.useSubstitutes && drvOptions->substitutesAllowed()) for (auto & [outputName, status] : initialOutputs) { if (!status.wanted) continue; if (!status.known) @@ -268,12 +260,7 @@ Goal::Co DerivationGoal::haveDerivation() } if (!waitees.empty()) co_await Suspend{}; /* to prevent hang (no wake-up event) */ - co_return outputsSubstitutionTried(); -} - -Goal::Co DerivationGoal::outputsSubstitutionTried() -{ trace("all outputs substituted (maybe)"); assert(!drv->type().isImpure()); @@ -399,84 +386,7 @@ Goal::Co DerivationGoal::gaveUpOnSubstitution() } if (!waitees.empty()) co_await Suspend{}; /* to prevent hang (no wake-up event) */ - co_return inputsRealised(); -} - -Goal::Co DerivationGoal::repairClosure() -{ - assert(!drv->type().isImpure()); - - /* If we're repairing, we now know that our own outputs are valid. - Now check whether the other paths in the outputs closure are - good. If not, then start derivation goals for the derivations - that produced those outputs. */ - - /* Get the output closure. */ - auto outputs = queryDerivationOutputMap(); - StorePathSet outputClosure; - for (auto & i : outputs) { - if (!wantedOutputs.contains(i.first)) continue; - worker.store.computeFSClosure(i.second, outputClosure); - } - - /* Filter out our own outputs (which we have already checked). */ - for (auto & i : outputs) - outputClosure.erase(i.second); - - /* Get all dependencies of this derivation so that we know which - derivation is responsible for which path in the output - closure. */ - StorePathSet inputClosure; - if (useDerivation) worker.store.computeFSClosure(drvPath, inputClosure); - std::map outputsToDrv; - for (auto & i : inputClosure) - if (i.isDerivation()) { - auto depOutputs = worker.store.queryPartialDerivationOutputMap(i, &worker.evalStore); - for (auto & j : depOutputs) - if (j.second) - outputsToDrv.insert_or_assign(*j.second, i); - } - - /* Check each path (slow!). */ - for (auto & i : outputClosure) { - if (worker.pathContentsGood(i)) continue; - printError( - "found corrupted or missing path '%s' in the output closure of '%s'", - worker.store.printStorePath(i), worker.store.printStorePath(drvPath)); - auto drvPath2 = outputsToDrv.find(i); - if (drvPath2 == outputsToDrv.end()) - addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i, Repair))); - else - addWaitee(worker.makeGoal( - DerivedPath::Built { - .drvPath = makeConstantStorePathRef(drvPath2->second), - .outputs = OutputsSpec::All { }, - }, - bmRepair)); - } - - if (waitees.empty()) { - co_return done(BuildResult::AlreadyValid, assertPathValidity()); - } else { - co_await Suspend{}; - co_return closureRepaired(); - } -} - - -Goal::Co DerivationGoal::closureRepaired() -{ - trace("closure repaired"); - if (nrFailed > 0) - throw Error("some paths in the output closure of derivation '%s' could not be repaired", - worker.store.printStorePath(drvPath)); - co_return done(BuildResult::AlreadyValid, assertPathValidity()); -} - - -Goal::Co DerivationGoal::inputsRealised() -{ trace("all inputs realised"); if (nrFailed != 0) { @@ -718,7 +628,7 @@ Goal::Co DerivationGoal::tryToBuild() `preferLocalBuild' set. Also, check and repair modes are only supported for local builds. */ bool buildLocally = - (buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store)) + (buildMode != bmNormal || drvOptions->willBuildLocally(worker.store, *drv)) && settings.maxBuildJobs.get() != 0; if (!buildLocally) { @@ -766,6 +676,73 @@ Goal::Co DerivationGoal::tryLocalBuild() { } +Goal::Co DerivationGoal::repairClosure() +{ + assert(!drv->type().isImpure()); + + /* If we're repairing, we now know that our own outputs are valid. + Now check whether the other paths in the outputs closure are + good. If not, then start derivation goals for the derivations + that produced those outputs. */ + + /* Get the output closure. */ + auto outputs = queryDerivationOutputMap(); + StorePathSet outputClosure; + for (auto & i : outputs) { + if (!wantedOutputs.contains(i.first)) continue; + worker.store.computeFSClosure(i.second, outputClosure); + } + + /* Filter out our own outputs (which we have already checked). */ + for (auto & i : outputs) + outputClosure.erase(i.second); + + /* Get all dependencies of this derivation so that we know which + derivation is responsible for which path in the output + closure. */ + StorePathSet inputClosure; + if (useDerivation) worker.store.computeFSClosure(drvPath, inputClosure); + std::map outputsToDrv; + for (auto & i : inputClosure) + if (i.isDerivation()) { + auto depOutputs = worker.store.queryPartialDerivationOutputMap(i, &worker.evalStore); + for (auto & j : depOutputs) + if (j.second) + outputsToDrv.insert_or_assign(*j.second, i); + } + + /* Check each path (slow!). */ + for (auto & i : outputClosure) { + if (worker.pathContentsGood(i)) continue; + printError( + "found corrupted or missing path '%s' in the output closure of '%s'", + worker.store.printStorePath(i), worker.store.printStorePath(drvPath)); + auto drvPath2 = outputsToDrv.find(i); + if (drvPath2 == outputsToDrv.end()) + addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i, Repair))); + else + addWaitee(worker.makeGoal( + DerivedPath::Built { + .drvPath = makeConstantStorePathRef(drvPath2->second), + .outputs = OutputsSpec::All { }, + }, + bmRepair)); + } + + if (waitees.empty()) { + co_return done(BuildResult::AlreadyValid, assertPathValidity()); + } else { + co_await Suspend{}; + + trace("closure repaired"); + if (nrFailed > 0) + throw Error("some paths in the output closure of derivation '%s' could not be repaired", + worker.store.printStorePath(drvPath)); + co_return done(BuildResult::AlreadyValid, assertPathValidity()); + } +} + + static void chmod_(const Path & path, mode_t mode) { if (chmod(path.c_str(), mode) == -1) @@ -1145,7 +1122,7 @@ HookReply DerivationGoal::tryBuildHook() << (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0) << drv->platform << worker.store.printStorePath(drvPath) - << parsedDrv->getRequiredSystemFeatures(); + << drvOptions->getRequiredSystemFeatures(*drv); worker.hook->sink.flush(); /* Read the first line of input, which should be a word indicating @@ -1247,7 +1224,7 @@ SingleDrvOutputs DerivationGoal::registerOutputs() to do anything here. We can only early return when the outputs are known a priori. For - floating content-addressed derivations this isn't the case. + floating content-addressing derivations this isn't the case. */ return assertPathValidity(); } diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh index ad3d9ca2a..4622cb2b1 100644 --- a/src/libstore/build/derivation-goal.hh +++ b/src/libstore/build/derivation-goal.hh @@ -2,6 +2,7 @@ ///@file #include "parsed-derivations.hh" +#include "derivation-options.hh" #ifndef _WIN32 # include "user-lock.hh" #endif @@ -80,7 +81,7 @@ struct DerivationGoal : public Goal /** * Mapping from input derivations + output names to actual store * paths. This is filled in by waiteeDone() as each dependency - * finishes, before inputsRealised() is reached. + * finishes, before `trace("all inputs realised")` is reached. */ std::map, StorePath> inputDrvOutputs; @@ -143,6 +144,7 @@ struct DerivationGoal : public Goal std::unique_ptr drv; std::unique_ptr parsedDrv; + std::unique_ptr drvOptions; /** * The remainder is state held during the build. @@ -233,13 +235,8 @@ struct DerivationGoal : public Goal * The states. */ Co init() override; - Co getDerivation(); - Co loadDerivation(); Co haveDerivation(); - Co outputsSubstitutionTried(); Co gaveUpOnSubstitution(); - Co closureRepaired(); - Co inputsRealised(); Co tryToBuild(); virtual Co tryLocalBuild(); Co buildDone(); diff --git a/src/libstore/ca-specific-schema.sql b/src/libstore/ca-specific-schema.sql index 4ca91f585..c5e4e3897 100644 --- a/src/libstore/ca-specific-schema.sql +++ b/src/libstore/ca-specific-schema.sql @@ -1,4 +1,4 @@ --- Extension of the sql schema for content-addressed derivations. +-- Extension of the sql schema for content-addressing derivations. -- Won't be loaded unless the experimental feature `ca-derivations` -- is enabled diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index b921dbe2d..60cb64b7b 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -593,7 +593,7 @@ static void performOp(TunnelLogger * logger, ref store, auto drvType = drv.type(); - /* Content-addressed derivations are trustless because their output paths + /* Content-addressing derivations are trustless because their output paths are verified by their content alone, so any derivation is free to try to produce such a path. @@ -1041,11 +1041,15 @@ void processConnection( conn.protoVersion = protoVersion; conn.features = features; - auto tunnelLogger = new TunnelLogger(conn.to, protoVersion); - auto prevLogger = nix::logger; + auto tunnelLogger_ = std::make_unique(conn.to, protoVersion); + auto tunnelLogger = tunnelLogger_.get(); + std::unique_ptr prevLogger_; + auto prevLogger = logger.get(); // FIXME - if (!recursive) - logger = tunnelLogger; + if (!recursive) { + prevLogger_ = std::move(logger); + logger = std::move(tunnelLogger_); + } unsigned int opCount = 0; diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc new file mode 100644 index 000000000..1fc1718f7 --- /dev/null +++ b/src/libstore/derivation-options.cc @@ -0,0 +1,274 @@ +#include "derivation-options.hh" +#include "json-utils.hh" +#include "parsed-derivations.hh" +#include "types.hh" +#include "util.hh" +#include +#include +#include + +namespace nix { + +using OutputChecks = DerivationOptions::OutputChecks; + +using OutputChecksVariant = std::variant>; + +DerivationOptions DerivationOptions::fromParsedDerivation(const ParsedDerivation & parsed, bool shouldWarn) +{ + DerivationOptions defaults = {}; + + auto structuredAttrs = parsed.structuredAttrs.get(); + + if (shouldWarn && structuredAttrs) { + if (get(*structuredAttrs, "allowedReferences")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "allowedRequisites")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "disallowedRequisites")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "disallowedReferences")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "maxSize")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "maxClosureSize")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead"); + } + } + + return { + .outputChecks = [&]() -> OutputChecksVariant { + if (auto structuredAttrs = parsed.structuredAttrs.get()) { + std::map res; + if (auto outputChecks = get(*structuredAttrs, "outputChecks")) { + for (auto & [outputName, output] : getObject(*outputChecks)) { + OutputChecks checks; + + if (auto maxSize = get(output, "maxSize")) + checks.maxSize = maxSize->get(); + + if (auto maxClosureSize = get(output, "maxClosureSize")) + checks.maxClosureSize = maxClosureSize->get(); + + auto get_ = [&](const std::string & name) -> std::optional { + if (auto i = get(output, name)) { + StringSet res; + for (auto j = i->begin(); j != i->end(); ++j) { + if (!j->is_string()) + throw Error("attribute '%s' must be a list of strings", name); + res.insert(j->get()); + } + checks.disallowedRequisites = res; + return res; + } + return {}; + }; + + checks.allowedReferences = get_("allowedReferences"); + checks.allowedRequisites = get_("allowedRequisites"); + checks.disallowedReferences = get_("disallowedReferences").value_or(StringSet{}); + checks.disallowedRequisites = get_("disallowedRequisites").value_or(StringSet{}); + ; + + res.insert_or_assign(outputName, std::move(checks)); + } + } + return res; + } else { + return OutputChecks{ + // legacy non-structured-attributes case + .ignoreSelfRefs = true, + .allowedReferences = parsed.getStringSetAttr("allowedReferences"), + .disallowedReferences = parsed.getStringSetAttr("disallowedReferences").value_or(StringSet{}), + .allowedRequisites = parsed.getStringSetAttr("allowedRequisites"), + .disallowedRequisites = parsed.getStringSetAttr("disallowedRequisites").value_or(StringSet{}), + }; + } + }(), + .unsafeDiscardReferences = + [&] { + std::map res; + + if (auto structuredAttrs = parsed.structuredAttrs.get()) { + if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) { + for (auto & [outputName, output] : getObject(*udr)) { + if (!output.is_boolean()) + throw Error("attribute 'unsafeDiscardReferences.\"%s\"' must be a Boolean", outputName); + res.insert_or_assign(outputName, output.get()); + } + } + } + + return res; + }(), + .passAsFile = + [&] { + StringSet res; + if (auto * passAsFileString = get(parsed.drv.env, "passAsFile")) { + if (parsed.hasStructuredAttrs()) { + if (shouldWarn) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'passAsFile'; because all JSON is always passed via file"); + } + } else { + res = tokenizeString(*passAsFileString); + } + } + return res; + }(), + .additionalSandboxProfile = + parsed.getStringAttr("__sandboxProfile").value_or(defaults.additionalSandboxProfile), + .noChroot = parsed.getBoolAttr("__noChroot", defaults.noChroot), + .impureHostDeps = parsed.getStringSetAttr("__impureHostDeps").value_or(defaults.impureHostDeps), + .impureEnvVars = parsed.getStringSetAttr("impureEnvVars").value_or(defaults.impureEnvVars), + .allowLocalNetworking = parsed.getBoolAttr("__darwinAllowLocalNetworking", defaults.allowLocalNetworking), + .requiredSystemFeatures = + parsed.getStringSetAttr("requiredSystemFeatures").value_or(defaults.requiredSystemFeatures), + .preferLocalBuild = parsed.getBoolAttr("preferLocalBuild", defaults.preferLocalBuild), + .allowSubstitutes = parsed.getBoolAttr("allowSubstitutes", defaults.allowSubstitutes), + }; +} + +StringSet DerivationOptions::getRequiredSystemFeatures(const BasicDerivation & drv) const +{ + // FIXME: cache this? + StringSet res; + for (auto & i : requiredSystemFeatures) + res.insert(i); + if (!drv.type().hasKnownOutputPaths()) + res.insert("ca-derivations"); + return res; +} + +bool DerivationOptions::canBuildLocally(Store & localStore, const BasicDerivation & drv) const +{ + if (drv.platform != settings.thisSystem.get() && !settings.extraPlatforms.get().count(drv.platform) + && !drv.isBuiltin()) + return false; + + if (settings.maxBuildJobs.get() == 0 && !drv.isBuiltin()) + return false; + + for (auto & feature : getRequiredSystemFeatures(drv)) + if (!localStore.systemFeatures.get().count(feature)) + return false; + + return true; +} + +bool DerivationOptions::willBuildLocally(Store & localStore, const BasicDerivation & drv) const +{ + return preferLocalBuild && canBuildLocally(localStore, drv); +} + +bool DerivationOptions::substitutesAllowed() const +{ + return settings.alwaysAllowSubstitutes ? true : allowSubstitutes; +} + +bool DerivationOptions::useUidRange(const BasicDerivation & drv) const +{ + return getRequiredSystemFeatures(drv).count("uid-range"); +} + +} + +namespace nlohmann { + +using namespace nix; + +DerivationOptions adl_serializer::from_json(const json & json) +{ + return { + .outputChecks = [&]() -> OutputChecksVariant { + auto outputChecks = getObject(valueAt(json, "outputChecks")); + + auto forAllOutputsOpt = optionalValueAt(outputChecks, "forAllOutputs"); + auto perOutputOpt = optionalValueAt(outputChecks, "perOutput"); + + if (forAllOutputsOpt && !perOutputOpt) { + return static_cast(*forAllOutputsOpt); + } else if (perOutputOpt && !forAllOutputsOpt) { + return static_cast>(*perOutputOpt); + } else { + throw Error("Exactly one of 'perOutput' or 'forAllOutputs' is required"); + } + }(), + + .unsafeDiscardReferences = valueAt(json, "unsafeDiscardReferences"), + .passAsFile = getStringSet(valueAt(json, "passAsFile")), + + .additionalSandboxProfile = getString(valueAt(json, "additionalSandboxProfile")), + .noChroot = getBoolean(valueAt(json, "noChroot")), + .impureHostDeps = getStringSet(valueAt(json, "impureHostDeps")), + .impureEnvVars = getStringSet(valueAt(json, "impureEnvVars")), + .allowLocalNetworking = getBoolean(valueAt(json, "allowLocalNetworking")), + + .requiredSystemFeatures = getStringSet(valueAt(json, "requiredSystemFeatures")), + .preferLocalBuild = getBoolean(valueAt(json, "preferLocalBuild")), + .allowSubstitutes = getBoolean(valueAt(json, "allowSubstitutes")), + }; +} + +void adl_serializer::to_json(json & json, DerivationOptions o) +{ + json["outputChecks"] = std::visit( + overloaded{ + [&](const OutputChecks & checks) { + nlohmann::json outputChecks; + outputChecks["forAllOutputs"] = checks; + return outputChecks; + }, + [&](const std::map & checksPerOutput) { + nlohmann::json outputChecks; + outputChecks["perOutput"] = checksPerOutput; + return outputChecks; + }, + }, + o.outputChecks); + + json["unsafeDiscardReferences"] = o.unsafeDiscardReferences; + json["passAsFile"] = o.passAsFile; + + json["additionalSandboxProfile"] = o.additionalSandboxProfile; + json["noChroot"] = o.noChroot; + json["impureHostDeps"] = o.impureHostDeps; + json["impureEnvVars"] = o.impureEnvVars; + json["allowLocalNetworking"] = o.allowLocalNetworking; + + json["requiredSystemFeatures"] = o.requiredSystemFeatures; + json["preferLocalBuild"] = o.preferLocalBuild; + json["allowSubstitutes"] = o.allowSubstitutes; +} + +DerivationOptions::OutputChecks adl_serializer::from_json(const json & json) +{ + return { + .ignoreSelfRefs = getBoolean(valueAt(json, "ignoreSelfRefs")), + .allowedReferences = nullableValueAt(json, "allowedReferences"), + .disallowedReferences = getStringSet(valueAt(json, "disallowedReferences")), + .allowedRequisites = nullableValueAt(json, "allowedRequisites"), + .disallowedRequisites = getStringSet(valueAt(json, "disallowedRequisites")), + }; +} + +void adl_serializer::to_json(json & json, DerivationOptions::OutputChecks c) +{ + json["ignoreSelfRefs"] = c.ignoreSelfRefs; + json["allowedReferences"] = c.allowedReferences; + json["disallowedReferences"] = c.disallowedReferences; + json["allowedRequisites"] = c.allowedRequisites; + json["disallowedRequisites"] = c.disallowedRequisites; +} + +} diff --git a/src/libstore/derivation-options.hh b/src/libstore/derivation-options.hh new file mode 100644 index 000000000..6e4ea5cd9 --- /dev/null +++ b/src/libstore/derivation-options.hh @@ -0,0 +1,185 @@ +#pragma once +///@file + +#include +#include +#include +#include + +#include "types.hh" +#include "json-impls.hh" + +namespace nix { + +class Store; +struct BasicDerivation; +class ParsedDerivation; + +/** + * This represents all the special options on a `Derivation`. + * + * Currently, these options are parsed from the environment variables + * with the aid of `ParsedDerivation`. + * + * The first goal of this data type is to make sure that no other code + * uses `ParsedDerivation` to ad-hoc parse some additional options. That + * ensures this data type is up to date and fully correct. + * + * The second goal of this data type is to allow an alternative to + * hackily parsing the options from the environment variables. The ATerm + * format cannot change, but in alternatives to it (like the JSON + * format), we have the option of instead storing the options + * separately. That would be nice to separate concerns, and not make any + * environment variable names magical. + */ +struct DerivationOptions +{ + struct OutputChecks + { + bool ignoreSelfRefs = false; + std::optional maxSize, maxClosureSize; + + /** + * env: allowedReferences + * + * A value of `nullopt` indicates that the check is skipped. + * This means that all references are allowed. + */ + std::optional allowedReferences; + + /** + * env: disallowedReferences + * + * No needed for `std::optional`, because skipping the check is + * the same as disallowing the references. + */ + StringSet disallowedReferences; + + /** + * env: allowedRequisites + * + * See `allowedReferences` + */ + std::optional allowedRequisites; + + /** + * env: disallowedRequisites + * + * See `disallowedReferences` + */ + StringSet disallowedRequisites; + + bool operator==(const OutputChecks &) const = default; + }; + + /** + * Either one set of checks for all outputs, or separate checks + * per-output. + */ + std::variant> outputChecks = OutputChecks{}; + + /** + * Whether to avoid scanning for references for a given output. + */ + std::map unsafeDiscardReferences; + + /** + * In non-structured mode, all bindings specified in the derivation + * go directly via the environment, except those listed in the + * passAsFile attribute. Those are instead passed as file names + * pointing to temporary files containing the contents. + * + * Note that passAsFile is ignored in structure mode because it's + * not needed (attributes are not passed through the environment, so + * there is no size constraint). + */ + StringSet passAsFile; + + /** + * env: __sandboxProfile + * + * Just for Darwin + */ + std::string additionalSandboxProfile = ""; + + /** + * env: __noChroot + * + * Derivation would like to opt out of the sandbox. + * + * Builder is free to not respect this wish (because it is + * insecure) and fail the build instead. + */ + bool noChroot = false; + + /** + * env: __impureHostDeps + */ + StringSet impureHostDeps = {}; + + /** + * env: impureEnvVars + */ + StringSet impureEnvVars = {}; + + /** + * env: __darwinAllowLocalNetworking + * + * Just for Darwin + */ + bool allowLocalNetworking = false; + + /** + * env: requiredSystemFeatures + */ + StringSet requiredSystemFeatures = {}; + + /** + * env: preferLocalBuild + */ + bool preferLocalBuild = false; + + /** + * env: allowSubstitutes + */ + bool allowSubstitutes = true; + + bool operator==(const DerivationOptions &) const = default; + + /** + * Parse this information from its legacy encoding as part of the + * environment. This should not be used with nice greenfield formats + * (e.g. JSON) but is necessary for supporing old formats (e.g. + * ATerm). + */ + static DerivationOptions fromParsedDerivation(const ParsedDerivation & parsed, bool shouldWarn = true); + + /** + * @param drv Must be the same derivation we parsed this from. In + * the future we'll flip things around so a `BasicDerivation` has + * `DerivationOptions` instead. + */ + StringSet getRequiredSystemFeatures(const BasicDerivation & drv) const; + + /** + * @param drv See note on `getRequiredSystemFeatures` + */ + bool canBuildLocally(Store & localStore, const BasicDerivation & drv) const; + + /** + * @param drv See note on `getRequiredSystemFeatures` + */ + bool willBuildLocally(Store & localStore, const BasicDerivation & drv) const; + + bool substitutesAllowed() const; + + /** + * @param drv See note on `getRequiredSystemFeatures` + */ + bool useUidRange(const BasicDerivation & drv) const; +}; + +}; + +JSON_IMPL(DerivationOptions); +JSON_IMPL(DerivationOptions::OutputChecks) diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 1f37b0c38..b54838a0a 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -300,7 +300,7 @@ static DerivationOutput parseDerivationOutput( } else { xpSettings.require(Xp::CaDerivations); if (pathS != "") - throw FormatError("content-addressed derivation output should not specify output path"); + throw FormatError("content-addressing derivation output should not specify output path"); return DerivationOutput::CAFloating { .method = std::move(method), .hashAlgo = std::move(hashAlgo), @@ -843,16 +843,6 @@ DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOut }; } - if (type.isImpure()) { - std::map outputHashes; - for (const auto & [outputName, _] : drv.outputs) - outputHashes.insert_or_assign(outputName, impureOutputHash); - return DrvHash { - .hashes = outputHashes, - .kind = DrvHash::Kind::Deferred, - }; - } - auto kind = std::visit(overloaded { [](const DerivationType::InputAddressed & ia) { /* This might be a "pesimistically" deferred output, so we don't @@ -865,7 +855,7 @@ DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOut : DrvHash::Kind::Deferred; }, [](const DerivationType::Impure &) -> DrvHash::Kind { - assert(false); + return DrvHash::Kind::Deferred; } }, drv.type().raw); diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh index 765b66ade..5b2101ed5 100644 --- a/src/libstore/derivations.hh +++ b/src/libstore/derivations.hh @@ -187,7 +187,7 @@ struct DerivationType { }; /** - * Content-addressed derivation types + * Content-addressing derivation types */ struct ContentAddressed { /** @@ -526,6 +526,4 @@ void writeDerivation(Sink & out, const StoreDirConfig & store, const BasicDeriva */ std::string hashPlaceholder(const OutputNameView outputName); -extern const Hash impureOutputHash; - } diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 28a437e56..1e7ee8c37 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -94,7 +94,7 @@ struct curlFileTransfer : public FileTransfer : fileTransfer(fileTransfer) , request(request) , act(*logger, lvlTalkative, actFileTransfer, - fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri), + request.post ? "" : fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri), {request.uri}, request.parentAct) , callback(std::move(callback)) , finalSink([this](std::string_view data) { @@ -261,7 +261,7 @@ struct curlFileTransfer : public FileTransfer return ((TransferItem *) userp)->headerCallback(contents, size, nmemb); } - int progressCallback(double dltotal, double dlnow) + int progressCallback(curl_off_t dltotal, curl_off_t dlnow) { try { act.progress(dlnow, dltotal); @@ -271,11 +271,21 @@ struct curlFileTransfer : public FileTransfer return getInterrupted(); } - static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow) + int silentProgressCallback(curl_off_t dltotal, curl_off_t dlnow) + { + return getInterrupted(); + } + + static int progressCallbackWrapper(void * userp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) { return ((TransferItem *) userp)->progressCallback(dltotal, dlnow); } + static int silentProgressCallbackWrapper(void * userp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) + { + return ((TransferItem *) userp)->silentProgressCallback(dltotal, dlnow); + } + static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr) { if (type == CURLINFO_TEXT) @@ -342,8 +352,11 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper); curl_easy_setopt(req, CURLOPT_HEADERDATA, this); - curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper); - curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this); + if (request.post) + curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, silentProgressCallbackWrapper); + else + curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, progressCallbackWrapper); + curl_easy_setopt(req, CURLOPT_XFERINFODATA, this); curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0); curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders); @@ -355,7 +368,10 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_NOBODY, 1); if (request.data) { - curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); + if (request.post) + curl_easy_setopt(req, CURLOPT_POST, 1L); + else + curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); curl_easy_setopt(req, CURLOPT_READDATA, this); curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); @@ -432,7 +448,8 @@ struct curlFileTransfer : public FileTransfer if (httpStatus == 304 && result.etag == "") result.etag = request.expectedETag; - act.progress(result.bodySize, result.bodySize); + if (!request.post) + act.progress(result.bodySize, result.bodySize); done = true; callback(std::move(result)); } diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 43a384d71..0ecc7f376 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -65,6 +65,7 @@ struct FileTransferRequest std::string expectedETag; bool verifyTLS = true; bool head = false; + bool post = false; size_t tries = fileTransferSettings.tries; unsigned int baseRetryTimeMs = 250; ActivityId parentAct; diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 30a2f775e..f52b5c4dd 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -184,7 +184,7 @@ public: this, SYSTEM, "system", R"( The system type of the current Nix installation. - Nix will only build a given [derivation](@docroot@/language/derivations.md) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms). + Nix will only build a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms). The default value is set when Nix itself is compiled for the system it will run on. The following system types are widely used, as Nix is actively supported on these platforms: @@ -820,7 +820,7 @@ public: R"( System types of executables that can be run on this machine. - Nix will only build a given [derivation](@docroot@/language/derivations.md) locally when its `system` attribute equals any of the values specified here or in the [`system` option](#conf-system). + Nix will only build a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in the [`system` option](#conf-system). Setting this can be useful to build derivations locally on compatible machines: - `i686-linux` executables can be run on `x86_64-linux` machines (set by default) @@ -1059,7 +1059,10 @@ public: 1. `NIX_SSL_CERT_FILE` 2. `SSL_CERT_FILE` - )"}; + )", + {}, + // Don't document the machine-specific default value + false}; #if __linux__ Setting filterSyscalls{ diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 9a7a941b6..67d5a1dcb 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -172,16 +172,15 @@ LocalStore::LocalStore( /* Ensure that the store and its parents are not symlinks. */ if (!settings.allowSymlinkedStore) { - Path path = realStoreDir; - struct stat st; - while (path != "/") { - st = lstat(path); - if (S_ISLNK(st.st_mode)) + std::filesystem::path path = realStoreDir.get(); + std::filesystem::path root = path.root_path(); + while (path != root) { + if (std::filesystem::is_symlink(path)) throw Error( "the path '%1%' is a symlink; " "this is not allowed for the Nix store and its parent directories", path); - path = dirOf(path); + path = path.parent_path(); } } diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 85192c299..063bc43a5 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -200,6 +200,7 @@ sources = files( 'content-address.cc', 'daemon.cc', 'derivations.cc', + 'derivation-options.cc', 'derived-path-map.cc', 'derived-path.cc', 'downstream-placeholder.cc', @@ -271,6 +272,7 @@ headers = [config_h] + files( 'content-address.hh', 'daemon.hh', 'derivations.hh', + 'derivation-options.hh', 'derived-path-map.hh', 'derived-path.hh', 'downstream-placeholder.hh', diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index bcc02206b..9d3b24326 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -2,6 +2,7 @@ #include "derivations.hh" #include "parsed-derivations.hh" +#include "derivation-options.hh" #include "globals.hh" #include "store-api.hh" #include "thread-pool.hh" @@ -222,8 +223,9 @@ void Store::queryMissing(const std::vector & targets, auto drv = make_ref(derivationFromPath(drvPath)); ParsedDerivation parsedDrv(StorePath(drvPath), *drv); + DerivationOptions drvOptions = DerivationOptions::fromParsedDerivation(parsedDrv); - if (!knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) { + if (!knownOutputPaths && settings.useSubstitutes && drvOptions.substitutesAllowed()) { experimentalFeatureSettings.require(Xp::CaDerivations); // If there are unknown output paths, attempt to find if the @@ -253,7 +255,7 @@ void Store::queryMissing(const std::vector & targets, } } - if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) { + if (knownOutputPaths && settings.useSubstitutes && drvOptions.substitutesAllowed()) { auto drvState = make_ref>(DrvState(invalid.size())); for (auto & output : invalid) pool.enqueue(std::bind(checkOutput, drvPath, drv, output, drvState)); diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index d8459d4d7..b26c36efe 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -87,47 +87,12 @@ std::optional ParsedDerivation::getStringsAttr(const std::string & name } } -StringSet ParsedDerivation::getRequiredSystemFeatures() const +std::optional ParsedDerivation::getStringSetAttr(const std::string & name) const { - // FIXME: cache this? - StringSet res; - for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) - res.insert(i); - if (!drv.type().hasKnownOutputPaths()) - res.insert("ca-derivations"); - return res; -} - -bool ParsedDerivation::canBuildLocally(Store & localStore) const -{ - if (drv.platform != settings.thisSystem.get() - && !settings.extraPlatforms.get().count(drv.platform) - && !drv.isBuiltin()) - return false; - - if (settings.maxBuildJobs.get() == 0 - && !drv.isBuiltin()) - return false; - - for (auto & feature : getRequiredSystemFeatures()) - if (!localStore.systemFeatures.get().count(feature)) return false; - - return true; -} - -bool ParsedDerivation::willBuildLocally(Store & localStore) const -{ - return getBoolAttr("preferLocalBuild") && canBuildLocally(localStore); -} - -bool ParsedDerivation::substitutesAllowed() const -{ - return settings.alwaysAllowSubstitutes ? true : getBoolAttr("allowSubstitutes", true); -} - -bool ParsedDerivation::useUidRange() const -{ - return getRequiredSystemFeatures().count("uid-range"); + auto ss = getStringsAttr(name); + return ss + ? (std::optional{StringSet{ss->begin(), ss->end()}}) + : (std::optional{}); } static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); @@ -188,7 +153,6 @@ static nlohmann::json pathInfoToJSON( std::optional ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths) { - auto structuredAttrs = getStructuredAttrs(); if (!structuredAttrs) return std::nullopt; auto json = *structuredAttrs; diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh index 71085a604..51992fa84 100644 --- a/src/libstore/parsed-derivations.hh +++ b/src/libstore/parsed-derivations.hh @@ -8,38 +8,40 @@ namespace nix { +struct DerivationOptions; + class ParsedDerivation { StorePath drvPath; BasicDerivation & drv; std::unique_ptr structuredAttrs; -public: - - ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv); - - ~ParsedDerivation(); - - const nlohmann::json * getStructuredAttrs() const - { - return structuredAttrs.get(); - } - std::optional getStringAttr(const std::string & name) const; bool getBoolAttr(const std::string & name, bool def = false) const; std::optional getStringsAttr(const std::string & name) const; - StringSet getRequiredSystemFeatures() const; + std::optional getStringSetAttr(const std::string & name) const; - bool canBuildLocally(Store & localStore) const; + /** + * Only `DerivationOptions` is allowed to parse individual fields + * from `ParsedDerivation`. This ensure that it includes all + * derivation options, and, the likes of `LocalDerivationGoal` are + * incapable of more ad-hoc options. + */ + friend struct DerivationOptions; - bool willBuildLocally(Store & localStore) const; +public: - bool substitutesAllowed() const; + ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv); - bool useUidRange() const; + ~ParsedDerivation(); + + bool hasStructuredAttrs() const + { + return static_cast(structuredAttrs); + } std::optional prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths); }; diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index b230079eb..533ea557d 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -608,7 +608,7 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id, auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->protoVersion) < 27) { - warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4"); + warn("the daemon is too old to support content-addressing derivations, please upgrade it to 2.4"); return callback(nullptr); } diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index f47cfbbec..70e6d5dfe 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -117,10 +117,10 @@ std::unique_ptr SSHMaster::startCommand( ProcessOptions options; options.dieWithParent = false; + std::unique_ptr loggerSuspension; if (!fakeSSH && !useMaster) { - logger->pause(); + loggerSuspension = std::make_unique(logger->suspend()); } - Finally cleanup = [&]() { logger->resume(); }; conn->sshPid = startProcess([&]() { restoreProcessContext(); @@ -199,8 +199,7 @@ Path SSHMaster::startMaster() ProcessOptions options; options.dieWithParent = false; - logger->pause(); - Finally cleanup = [&]() { logger->resume(); }; + auto suspension = logger->suspend(); if (isMasterRunning()) return state->socketPath; diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 474dffcb5..2eba88ea0 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -715,7 +715,7 @@ public: /** * Given a store path, return the realisation actually used in the realisation of this path: - * - If the path is a content-addressed derivation, try to resolve it + * - If the path is a content-addressing derivation, try to resolve it * - Otherwise, find one of its derivers */ std::optional getBuildDerivationPath(const StorePath &); diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index 5b9bc0bb0..b4f5c23a4 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -184,10 +184,6 @@ void LocalDerivationGoal::killSandbox(bool getStats) Goal::Co LocalDerivationGoal::tryLocalBuild() { -#if __APPLE__ - additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); -#endif - unsigned int curBuilds = worker.getNrLocalBuilds(); if (curBuilds >= settings.maxBuildJobs) { worker.waitForBuildSlot(shared_from_this()); @@ -200,13 +196,12 @@ Goal::Co LocalDerivationGoal::tryLocalBuild() /* Are we doing a chroot build? */ { - auto noChroot = parsedDrv->getBoolAttr("__noChroot"); if (settings.sandboxMode == smEnabled) { - if (noChroot) + if (drvOptions->noChroot) throw Error("derivation '%s' has '__noChroot' set, " "but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath)); #if __APPLE__ - if (additionalSandboxProfile != "") + if (drvOptions->additionalSandboxProfile != "") throw Error("derivation '%s' specifies a sandbox profile, " "but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath)); #endif @@ -215,7 +210,7 @@ Goal::Co LocalDerivationGoal::tryLocalBuild() else if (settings.sandboxMode == smDisabled) useChroot = false; else if (settings.sandboxMode == smRelaxed) - useChroot = derivationType->isSandboxed() && !noChroot; + useChroot = derivationType->isSandboxed() && !drvOptions->noChroot; } auto & localStore = getLocalStore(); @@ -240,7 +235,7 @@ Goal::Co LocalDerivationGoal::tryLocalBuild() if (useBuildUsers()) { if (!buildUser) - buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot); + buildUser = acquireUserLock(drvOptions->useUidRange(*drv) ? 65536 : 1, useChroot); if (!buildUser) { if (!actLock) @@ -531,13 +526,19 @@ void LocalDerivationGoal::startBuilder() killSandbox(false); /* Right platform? */ - if (!parsedDrv->canBuildLocally(worker.store)) - throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", - drv->platform, - concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), - worker.store.printStorePath(drvPath), - settings.thisSystem, - concatStringsSep(", ", worker.store.systemFeatures)); + if (!drvOptions->canBuildLocally(worker.store, *drv)) { + // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should tell them to run the command to install Darwin 2 + if (drv->platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") { + throw Error("run `/usr/sbin/softwareupdate --install-rosetta` to enable your %s to run programs for %s", settings.thisSystem, drv->platform); + } else { + throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", + drv->platform, + concatStringsSep(", ", drvOptions->getRequiredSystemFeatures(*drv)), + worker.store.printStorePath(drvPath), + settings.thisSystem, + concatStringsSep(", ", worker.store.systemFeatures)); + } + } /* Create a temporary directory where the build will take place. */ @@ -622,7 +623,7 @@ void LocalDerivationGoal::startBuilder() writeStructuredAttrs(); /* Handle exportReferencesGraph(), if set. */ - if (!parsedDrv->getStructuredAttrs()) { + if (!parsedDrv->hasStructuredAttrs()) { /* The `exportReferencesGraph' feature allows the references graph to be passed to a builder. This attribute should be a list of pairs [name1 path1 name2 path2 ...]. The references graph of @@ -696,7 +697,7 @@ void LocalDerivationGoal::startBuilder() PathSet allowedPaths = settings.allowedImpureHostPrefixes; /* This works like the above, except on a per-derivation level */ - auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings()); + auto impurePaths = drvOptions->impureHostDeps; for (auto & i : impurePaths) { bool found = false; @@ -716,7 +717,7 @@ void LocalDerivationGoal::startBuilder() throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps", worker.store.printStorePath(drvPath), i); - /* Allow files in __impureHostDeps to be missing; e.g. + /* Allow files in drvOptions->impureHostDeps to be missing; e.g. macOS 11+ has no /usr/lib/libSystem*.dylib */ pathsInChroot[i] = {i, true}; } @@ -756,10 +757,10 @@ void LocalDerivationGoal::startBuilder() nobody account. The latter is kind of a hack to support Samba-in-QEMU. */ createDirs(chrootRootDir + "/etc"); - if (parsedDrv->useUidRange()) + if (drvOptions->useUidRange(*drv)) chownToBuilder(chrootRootDir + "/etc"); - if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536)) + if (drvOptions->useUidRange(*drv) && (!buildUser || buildUser->getUIDCount() < 65536)) throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name); /* Declare the build user's group so that programs get a consistent @@ -800,7 +801,7 @@ void LocalDerivationGoal::startBuilder() out. */ for (auto & i : drv->outputsAndOptPaths(worker.store)) { /* If the name isn't known a priori (i.e. floating - content-addressed derivation), the temporary location we use + content-addressing derivation), the temporary location we use should be fresh. Freshness means it is impossible that the path is already in the sandbox, so we don't need to worry about removing it. */ @@ -818,7 +819,7 @@ void LocalDerivationGoal::startBuilder() } #else - if (parsedDrv->useUidRange()) + if (drvOptions->useUidRange(*drv)) throw Error("feature 'uid-range' is not supported on this platform"); #if __APPLE__ /* We don't really have any parent prep work to do (yet?) @@ -828,7 +829,7 @@ void LocalDerivationGoal::startBuilder() #endif #endif } else { - if (parsedDrv->useUidRange()) + if (drvOptions->useUidRange(*drv)) throw Error("feature 'uid-range' is only supported in sandboxed builds"); } @@ -873,7 +874,7 @@ void LocalDerivationGoal::startBuilder() /* Fire up a Nix daemon to process recursive Nix calls from the builder. */ - if (parsedDrv->getRequiredSystemFeatures().count("recursive-nix")) + if (drvOptions->getRequiredSystemFeatures(*drv).count("recursive-nix")) startDaemon(); /* Run the builder. */ @@ -1141,18 +1142,12 @@ void LocalDerivationGoal::initTmpDir() tmpDirInSandbox = tmpDir; #endif - /* In non-structured mode, add all bindings specified in the - derivation via the environment, except those listed in the - passAsFile attribute. Those are passed as file names pointing - to temporary files containing the contents. Note that - passAsFile is ignored in structure mode because it's not - needed (attributes are not passed through the environment, so - there is no size constraint). */ - if (!parsedDrv->getStructuredAttrs()) { - - StringSet passAsFile = tokenizeString(getOr(drv->env, "passAsFile", "")); + /* In non-structured mode, set all bindings either directory in the + environment or via a file, as specified by + `DerivationOptions::passAsFile`. */ + if (!parsedDrv->hasStructuredAttrs()) { for (auto & i : drv->env) { - if (passAsFile.find(i.first) == passAsFile.end()) { + if (drvOptions->passAsFile.find(i.first) == drvOptions->passAsFile.end()) { env[i.first] = i.second; } else { auto hash = hashString(HashAlgorithm::SHA256, i.first); @@ -1229,7 +1224,7 @@ void LocalDerivationGoal::initEnv() if (!impureEnv.empty()) experimentalFeatureSettings.require(Xp::ConfigurableImpureEnv); - for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings())) { + for (auto & i : drvOptions->impureEnvVars){ auto envVar = impureEnv.find(i); if (envVar != impureEnv.end()) { env[i] = envVar->second; @@ -1989,7 +1984,7 @@ void LocalDerivationGoal::runChild() } /* Make /etc unwritable */ - if (!parsedDrv->useUidRange()) + if (!drvOptions->useUidRange(*drv)) chmod_(chrootRootDir + "/etc", 0555); /* Unshare this mount namespace. This is necessary because @@ -2149,7 +2144,18 @@ void LocalDerivationGoal::runChild() without file-write* allowed, access() incorrectly returns EPERM */ sandboxProfile += "(allow file-read* file-write* process-exec\n"; + + // We create multiple allow lists, to avoid exceeding a limit in the darwin sandbox interpreter. + // See https://github.com/NixOS/nix/issues/4119 + // We split our allow groups approximately at half the actual limit, 1 << 16 + const int breakpoint = sandboxProfile.length() + (1 << 14); for (auto & i : pathsInChroot) { + + if (sandboxProfile.length() >= breakpoint) { + debug("Sandbox break: %d %d", sandboxProfile.length(), breakpoint); + sandboxProfile += ")\n(allow file-read* file-write* process-exec\n"; + } + if (i.first != i.second.source) throw Error( "can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin", @@ -2176,7 +2182,7 @@ void LocalDerivationGoal::runChild() } sandboxProfile += ")\n"; - sandboxProfile += additionalSandboxProfile; + sandboxProfile += drvOptions->additionalSandboxProfile; } else sandboxProfile += #include "sandbox-minimal.sb" @@ -2185,8 +2191,6 @@ void LocalDerivationGoal::runChild() debug("Generated sandbox profile:"); debug(sandboxProfile); - bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking"); - /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to put their files, if needed. */ Path globalTmpDir = canonPath(defaultTempDir(), true); @@ -2199,7 +2203,7 @@ void LocalDerivationGoal::runChild() Strings sandboxArgs; sandboxArgs.push_back("_GLOBAL_TMP_DIR"); sandboxArgs.push_back(globalTmpDir); - if (allowLocalNetworking) { + if (drvOptions->allowLocalNetworking) { sandboxArgs.push_back("_ALLOW_LOCAL_NETWORKING"); sandboxArgs.push_back("1"); } @@ -2219,7 +2223,7 @@ void LocalDerivationGoal::runChild() /* Execute the program. This should not return. */ if (drv->isBuiltin()) { try { - logger = makeJSONLogger(*logger); + logger = makeJSONLogger(getStandardError()); std::map outputs; for (auto & e : drv->outputs) @@ -2291,7 +2295,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs() to do anything here. We can only early return when the outputs are known a priori. For - floating content-addressed derivations this isn't the case. + floating content-addressing derivations this isn't the case. */ if (hook) return DerivationGoal::registerOutputs(); @@ -2389,14 +2393,8 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs() inodesSeen); bool discardReferences = false; - if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) { - if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) { - if (auto output = get(*udr, outputName)) { - if (!output->is_boolean()) - throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string()); - discardReferences = output->get(); - } - } + if (auto udr = get(drvOptions->unsafeDiscardReferences, outputName)) { + discardReferences = *udr; } StorePathSet references; @@ -2565,7 +2563,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs() case FileIngestionMethod::Git: { return git::dumpHash( outputHash.hashAlgo, - {getFSSourceAccessor(), CanonPath(tmpDir + "/tmp")}).hash; + {getFSSourceAccessor(), CanonPath(actualPath)}).hash; } } assert(false); @@ -2867,13 +2865,6 @@ void LocalDerivationGoal::checkOutputs(const std::map maxSize, maxClosureSize; - std::optional allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites; - }; - /* Compute the closure and closure size of some output. This is slightly tricky because some of its references (namely other outputs) may not be valid yet. */ @@ -2905,7 +2896,7 @@ void LocalDerivationGoal::checkOutputs(const std::map *checks.maxSize) throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes", @@ -2918,15 +2909,13 @@ void LocalDerivationGoal::checkOutputs(const std::map & value, bool allowed, bool recursive) + auto checkRefs = [&](const StringSet & value, bool allowed, bool recursive) { - if (!value) return; - /* Parse a list of reference specifiers. Each element must either be a store path, or the symbolic name of the output of the derivation (such as `out'). */ StorePathSet spec; - for (auto & i : *value) { + for (auto & i : value) { if (worker.store.isStorePath(i)) spec.insert(worker.store.parseStorePath(i)); else if (auto output = get(outputs, i)) @@ -2964,73 +2953,35 @@ void LocalDerivationGoal::checkOutputs(const std::mapgetStructuredAttrs()) { - if (get(*structuredAttrs, "allowedReferences")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "allowedRequisites")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "disallowedRequisites")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "disallowedReferences")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "maxSize")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "maxClosureSize")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead"); - } - if (auto outputChecks = get(*structuredAttrs, "outputChecks")) { - if (auto output = get(*outputChecks, outputName)) { - Checks checks; + std::visit(overloaded{ + [&](const DerivationOptions::OutputChecks & checks) { + applyChecks(checks); + }, + [&](const std::map & checksPerOutput) { + if (auto outputChecks = get(checksPerOutput, outputName)) - if (auto maxSize = get(*output, "maxSize")) - checks.maxSize = maxSize->get(); - - if (auto maxClosureSize = get(*output, "maxClosureSize")) - checks.maxClosureSize = maxClosureSize->get(); - - auto get_ = [&](const std::string & name) -> std::optional { - if (auto i = get(*output, name)) { - Strings res; - for (auto j = i->begin(); j != i->end(); ++j) { - if (!j->is_string()) - throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, worker.store.printStorePath(drvPath)); - res.push_back(j->get()); - } - checks.disallowedRequisites = res; - return res; - } - return {}; - }; - - checks.allowedReferences = get_("allowedReferences"); - checks.allowedRequisites = get_("allowedRequisites"); - checks.disallowedReferences = get_("disallowedReferences"); - checks.disallowedRequisites = get_("disallowedRequisites"); - - applyChecks(checks); - } - } - } else { - // legacy non-structured-attributes case - Checks checks; - checks.ignoreSelfRefs = true; - checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences"); - checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites"); - checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences"); - checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites"); - applyChecks(checks); - } + applyChecks(*outputChecks); + }, + }, drvOptions->outputChecks); } } diff --git a/src/libstore/unix/build/local-derivation-goal.hh b/src/libstore/unix/build/local-derivation-goal.hh index 1ea247661..c7a129f90 100644 --- a/src/libstore/unix/build/local-derivation-goal.hh +++ b/src/libstore/unix/build/local-derivation-goal.hh @@ -109,11 +109,6 @@ struct LocalDerivationGoal : public DerivationGoal typedef map Environment; Environment env; -#if __APPLE__ - typedef std::string SandboxProfile; - SandboxProfile additionalSandboxProfile; -#endif - /** * Hash rewriting. */ @@ -130,7 +125,7 @@ struct LocalDerivationGoal : public DerivationGoal * rewrite after the build. Otherwise the regular predetermined paths are * put here. * - * - Floating content-addressed derivations do not know their final build + * - Floating content-addressing derivations do not know their final build * output paths until the outputs are hashed, so random locations are * used, and then renamed. The randomness helps guard against hidden * self-references. diff --git a/src/libutil-tests/hash.cc b/src/libutil-tests/hash.cc index a88994d0b..3a639aef9 100644 --- a/src/libutil-tests/hash.cc +++ b/src/libutil-tests/hash.cc @@ -6,10 +6,52 @@ namespace nix { +class BLAKE3HashTest : public virtual ::testing::Test +{ +public: + + /** + * We set these in tests rather than the regular globals so we don't have + * to worry about race conditions if the tests run concurrently. + */ + ExperimentalFeatureSettings mockXpSettings; + +private: + + void SetUp() override + { + mockXpSettings.set("experimental-features", "blake3-hashes"); + } +}; + /* ---------------------------------------------------------------------------- * hashString * --------------------------------------------------------------------------*/ + TEST_F(BLAKE3HashTest, testKnownBLAKE3Hashes1) { + // values taken from: https://tools.ietf.org/html/rfc4634 + auto s = "abc"; + auto hash = hashString(HashAlgorithm::BLAKE3, s, mockXpSettings); + ASSERT_EQ(hash.to_string(HashFormat::Base16, true), + "blake3:6437b3ac38465133ffb63b75273a8db548c558465d79db03fd359c6cd5bd9d85"); + } + + TEST_F(BLAKE3HashTest, testKnownBLAKE3Hashes2) { + // values taken from: https://tools.ietf.org/html/rfc4634 + auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"; + auto hash = hashString(HashAlgorithm::BLAKE3, s, mockXpSettings); + ASSERT_EQ(hash.to_string(HashFormat::Base16, true), + "blake3:c19012cc2aaf0dc3d8e5c45a1b79114d2df42abb2a410bf54be09e891af06ff8"); + } + + TEST_F(BLAKE3HashTest, testKnownBLAKE3Hashes3) { + // values taken from: https://www.ietf.org/archive/id/draft-aumasson-blake3-00.txt + auto s = "IETF"; + auto hash = hashString(HashAlgorithm::BLAKE3, s, mockXpSettings); + ASSERT_EQ(hash.to_string(HashFormat::Base16, true), + "blake3:83a2de1ee6f4e6ab686889248f4ec0cf4cc5709446a682ffd1cbb4d6165181e2"); + } + TEST(hashString, testKnownMD5Hashes1) { // values taken from: https://tools.ietf.org/html/rfc1321 auto s1 = ""; diff --git a/src/libutil-tests/monitorfdhup.cc b/src/libutil-tests/monitorfdhup.cc new file mode 100644 index 000000000..01ecb92d9 --- /dev/null +++ b/src/libutil-tests/monitorfdhup.cc @@ -0,0 +1,18 @@ +#include "util.hh" +#include "monitor-fd.hh" + +#include +#include + +namespace nix { +TEST(MonitorFdHup, shouldNotBlock) +{ + Pipe p; + p.create(); + { + // when monitor gets destroyed it should cancel the + // background thread and do not block + MonitorFdHup monitor(p.readSide.get()); + } +} +} diff --git a/src/libutil/current-process.cc b/src/libutil/current-process.cc index ac01f441e..255ae2cf5 100644 --- a/src/libutil/current-process.cc +++ b/src/libutil/current-process.cc @@ -19,10 +19,6 @@ # include "namespaces.hh" #endif -#ifndef _WIN32 -# include -#endif - namespace nix { unsigned int getMaxCPU() @@ -55,11 +51,11 @@ unsigned int getMaxCPU() ////////////////////////////////////////////////////////////////////// +#ifndef _WIN32 size_t savedStackSize = 0; void setStackSize(size_t stackSize) { - #ifndef _WIN32 struct rlimit limit; if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) { savedStackSize = limit.rlim_cur; @@ -77,31 +73,8 @@ void setStackSize(size_t stackSize) ); } } - #else - ULONG_PTR stackLow, stackHigh; - GetCurrentThreadStackLimits(&stackLow, &stackHigh); - ULONG maxStackSize = stackHigh - stackLow; - ULONG currStackSize = 0; - // This retrieves the current promised stack size - SetThreadStackGuarantee(&currStackSize); - if (currStackSize < stackSize) { - savedStackSize = currStackSize; - ULONG newStackSize = std::min(static_cast(stackSize), maxStackSize); - if (SetThreadStackGuarantee(&newStackSize) == 0) { - logger->log( - lvlError, - HintFmt( - "Failed to increase stack size from %1% to %2% (maximum allowed stack size: %3%): %4%", - savedStackSize, - stackSize, - maxStackSize, - std::to_string(GetLastError()) - ).str() - ); - } - } - #endif } +#endif void restoreProcessContext(bool restoreMounts) { diff --git a/src/libutil/current-process.hh b/src/libutil/current-process.hh index 8286bf89d..660dcfe0b 100644 --- a/src/libutil/current-process.hh +++ b/src/libutil/current-process.hh @@ -17,10 +17,13 @@ namespace nix { */ unsigned int getMaxCPU(); +// It does not seem possible to dynamically change stack size on Windows. +#ifndef _WIN32 /** * Change the stack size. */ void setStackSize(size_t stackSize); +#endif /** * Restore the original inherited Unix process context (such as signal diff --git a/src/libutil/error.hh b/src/libutil/error.hh index 58d902622..04fa18e35 100644 --- a/src/libutil/error.hh +++ b/src/libutil/error.hh @@ -50,6 +50,14 @@ struct LinesOfCode { std::optional nextLineOfCode; }; +/* NOTE: position.hh recursively depends on source-path.hh -> source-accessor.hh + -> hash.hh -> config.hh -> experimental-features.hh -> error.hh -> Pos. + There are other such cycles. + Thus, Pos has to be an incomplete type in this header. But since ErrorInfo/Trace + have to refer to Pos, they have to use pointer indirection via std::shared_ptr + to break the recursive header dependency. + FIXME: Untangle this mess. Should there be AbstractPos as there used to be before + 4feb7d9f71? */ struct Pos; void printCodeLines(std::ostream & out, diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index df70adaa6..b7b8d9c6f 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -24,7 +24,7 @@ struct ExperimentalFeatureDetails * feature, we either have no issue at all if few features are not added * at the end of the list, or a proper merge conflict if they are. */ -constexpr size_t numXpFeatures = 1 + static_cast(Xp::PipeOperators); +constexpr size_t numXpFeatures = 1 + static_cast(Xp::BLAKE3Hashes); constexpr std::array xpFeatureDetails = {{ { @@ -109,6 +109,8 @@ constexpr std::array xpFeatureDetails runCommand "foo" { + # Optional: let Nix know "foo" requires the experimental feature + requiredSystemFeatures = [ "recursive-nix" ]; buildInputs = [ nix jq ]; NIX_PATH = "nixpkgs=${}"; } @@ -286,6 +288,14 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/55", }, + { + .tag = Xp::BLAKE3Hashes, + .name = "blake3-hashes", + .description = R"( + Enables support for BLAKE3 hashes. + )", + .trackingUrl = "", + }, }}; static_assert( diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh index 319b8cb11..e349b5031 100644 --- a/src/libutil/experimental-features.hh +++ b/src/libutil/experimental-features.hh @@ -35,6 +35,7 @@ enum struct ExperimentalFeature MountedSSHStore, VerifiedFetches, PipeOperators, + BLAKE3Hashes, }; extern std::set stabilizedFeatures; diff --git a/src/libutil/file-descriptor.cc b/src/libutil/file-descriptor.cc index 542c33f3b..707c0f882 100644 --- a/src/libutil/file-descriptor.cc +++ b/src/libutil/file-descriptor.cc @@ -1,6 +1,3 @@ -#include "file-system.hh" -#include "signals.hh" -#include "finally.hh" #include "serialise.hh" #include "util.hh" diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index b69dec685..6a7a8b092 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -8,6 +9,7 @@ #include "args.hh" #include "hash.hh" #include "archive.hh" +#include "config.hh" #include "split.hh" #include @@ -20,6 +22,7 @@ namespace nix { static size_t regularHashSize(HashAlgorithm type) { switch (type) { + case HashAlgorithm::BLAKE3: return blake3HashSize; case HashAlgorithm::MD5: return md5HashSize; case HashAlgorithm::SHA1: return sha1HashSize; case HashAlgorithm::SHA256: return sha256HashSize; @@ -29,12 +32,15 @@ static size_t regularHashSize(HashAlgorithm type) { } -const std::set hashAlgorithms = {"md5", "sha1", "sha256", "sha512" }; +const std::set hashAlgorithms = {"blake3", "md5", "sha1", "sha256", "sha512" }; const std::set hashFormats = {"base64", "nix32", "base16", "sri" }; -Hash::Hash(HashAlgorithm algo) : algo(algo) +Hash::Hash(HashAlgorithm algo, const ExperimentalFeatureSettings & xpSettings) : algo(algo) { + if (algo == HashAlgorithm::BLAKE3) { + xpSettings.require(Xp::BLAKE3Hashes); + } hashSize = regularHashSize(algo); assert(hashSize <= maxHashSize); memset(hash, 0, maxHashSize); @@ -284,6 +290,7 @@ Hash newHashAllowEmpty(std::string_view hashStr, std::optional ha union Ctx { + blake3_hasher blake3; MD5_CTX md5; SHA_CTX sha1; SHA256_CTX sha256; @@ -293,7 +300,8 @@ union Ctx static void start(HashAlgorithm ha, Ctx & ctx) { - if (ha == HashAlgorithm::MD5) MD5_Init(&ctx.md5); + if (ha == HashAlgorithm::BLAKE3) blake3_hasher_init(&ctx.blake3); + else if (ha == HashAlgorithm::MD5) MD5_Init(&ctx.md5); else if (ha == HashAlgorithm::SHA1) SHA1_Init(&ctx.sha1); else if (ha == HashAlgorithm::SHA256) SHA256_Init(&ctx.sha256); else if (ha == HashAlgorithm::SHA512) SHA512_Init(&ctx.sha512); @@ -303,7 +311,8 @@ static void start(HashAlgorithm ha, Ctx & ctx) static void update(HashAlgorithm ha, Ctx & ctx, std::string_view data) { - if (ha == HashAlgorithm::MD5) MD5_Update(&ctx.md5, data.data(), data.size()); + if (ha == HashAlgorithm::BLAKE3) blake3_hasher_update(&ctx.blake3, data.data(), data.size()); + else if (ha == HashAlgorithm::MD5) MD5_Update(&ctx.md5, data.data(), data.size()); else if (ha == HashAlgorithm::SHA1) SHA1_Update(&ctx.sha1, data.data(), data.size()); else if (ha == HashAlgorithm::SHA256) SHA256_Update(&ctx.sha256, data.data(), data.size()); else if (ha == HashAlgorithm::SHA512) SHA512_Update(&ctx.sha512, data.data(), data.size()); @@ -312,24 +321,24 @@ static void update(HashAlgorithm ha, Ctx & ctx, static void finish(HashAlgorithm ha, Ctx & ctx, unsigned char * hash) { - if (ha == HashAlgorithm::MD5) MD5_Final(hash, &ctx.md5); + if (ha == HashAlgorithm::BLAKE3) blake3_hasher_finalize(&ctx.blake3, hash, BLAKE3_OUT_LEN); + else if (ha == HashAlgorithm::MD5) MD5_Final(hash, &ctx.md5); else if (ha == HashAlgorithm::SHA1) SHA1_Final(hash, &ctx.sha1); else if (ha == HashAlgorithm::SHA256) SHA256_Final(hash, &ctx.sha256); else if (ha == HashAlgorithm::SHA512) SHA512_Final(hash, &ctx.sha512); } - -Hash hashString(HashAlgorithm ha, std::string_view s) +Hash hashString( + HashAlgorithm ha, std::string_view s, const ExperimentalFeatureSettings & xpSettings) { Ctx ctx; - Hash hash(ha); + Hash hash(ha, xpSettings); start(ha, ctx); update(ha, ctx, s); finish(ha, ctx, hash.hash); return hash; } - Hash hashFile(HashAlgorithm ha, const Path & path) { HashSink sink(ha); @@ -426,6 +435,7 @@ std::string_view printHashFormat(HashFormat HashFormat) std::optional parseHashAlgoOpt(std::string_view s) { + if (s == "blake3") return HashAlgorithm::BLAKE3; if (s == "md5") return HashAlgorithm::MD5; if (s == "sha1") return HashAlgorithm::SHA1; if (s == "sha256") return HashAlgorithm::SHA256; @@ -439,12 +449,13 @@ HashAlgorithm parseHashAlgo(std::string_view s) if (opt_h) return *opt_h; else - throw UsageError("unknown hash algorithm '%1%', expect 'md5', 'sha1', 'sha256', or 'sha512'", s); + throw UsageError("unknown hash algorithm '%1%', expect 'blake3', 'md5', 'sha1', 'sha256', or 'sha512'", s); } std::string_view printHashAlgo(HashAlgorithm ha) { switch (ha) { + case HashAlgorithm::BLAKE3: return "blake3"; case HashAlgorithm::MD5: return "md5"; case HashAlgorithm::SHA1: return "sha1"; case HashAlgorithm::SHA256: return "sha256"; diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index dc95b9f2f..13d526f42 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include "config.hh" #include "types.hh" #include "serialise.hh" #include "file-system.hh" @@ -11,9 +12,9 @@ namespace nix { MakeError(BadHash, Error); -enum struct HashAlgorithm : char { MD5 = 42, SHA1, SHA256, SHA512 }; - +enum struct HashAlgorithm : char { MD5 = 42, SHA1, SHA256, SHA512, BLAKE3 }; +const int blake3HashSize = 32; const int md5HashSize = 16; const int sha1HashSize = 20; const int sha256HashSize = 32; @@ -52,7 +53,7 @@ struct Hash /** * Create a zero-filled hash object. */ - explicit Hash(HashAlgorithm algo); + explicit Hash(HashAlgorithm algo, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Parse the hash from a string representation in the format @@ -157,7 +158,7 @@ std::string printHash16or32(const Hash & hash); /** * Compute the hash of the given string. */ -Hash hashString(HashAlgorithm ha, std::string_view s); +Hash hashString(HashAlgorithm ha, std::string_view s, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Compute the hash of the given file, hashing its contents directly. diff --git a/src/libutil/json-utils.cc b/src/libutil/json-utils.cc index dff068e07..f67811e21 100644 --- a/src/libutil/json-utils.cc +++ b/src/libutil/json-utils.cc @@ -3,6 +3,7 @@ #include "types.hh" #include #include +#include namespace nix { @@ -38,6 +39,15 @@ std::optional optionalValueAt(const nlohmann::json::object_t & m return std::optional { map.at(key) }; } +std::optional nullableValueAt(const nlohmann::json::object_t & map, const std::string & key) +{ + auto value = valueAt(map, key); + + if (value.is_null()) + return std::nullopt; + + return std::optional { std::move(value) }; +} const nlohmann::json * getNullable(const nlohmann::json & value) { diff --git a/src/libutil/json-utils.hh b/src/libutil/json-utils.hh index 546334e1e..1afc5d796 100644 --- a/src/libutil/json-utils.hh +++ b/src/libutil/json-utils.hh @@ -25,6 +25,7 @@ const nlohmann::json & valueAt( const std::string & key); std::optional optionalValueAt(const nlohmann::json::object_t & value, const std::string & key); +std::optional nullableValueAt(const nlohmann::json::object_t & value, const std::string & key); /** * Downcast the json object, failing with a nice error if the conversion fails. @@ -69,6 +70,9 @@ struct json_avoids_null> : std::true_type {}; template struct json_avoids_null> : std::true_type {}; +template +struct json_avoids_null> : std::true_type {}; + template struct json_avoids_null> : std::true_type {}; diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index a5add5565..406452738 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -29,7 +29,7 @@ void setCurActivity(const ActivityId activityId) curActivity = activityId; } -Logger * logger = makeSimpleLogger(true); +std::unique_ptr logger = makeSimpleLogger(true); void Logger::warn(const std::string & msg) { @@ -43,6 +43,19 @@ void Logger::writeToStdout(std::string_view s) writeFull(standard_out, "\n"); } +Logger::Suspension Logger::suspend() +{ + pause(); + return Suspension { ._finalize = {[this](){this->resume();}} }; +} + +std::optional Logger::suspendIf(bool cond) +{ + if (cond) + return suspend(); + return {}; +} + class SimpleLogger : public Logger { public: @@ -128,9 +141,9 @@ void writeToStderr(std::string_view s) } } -Logger * makeSimpleLogger(bool printBuildLogs) +std::unique_ptr makeSimpleLogger(bool printBuildLogs) { - return new SimpleLogger(printBuildLogs); + return std::make_unique(printBuildLogs); } std::atomic nextId{0}; @@ -167,9 +180,9 @@ void to_json(nlohmann::json & json, std::shared_ptr pos) } struct JSONLogger : Logger { - Logger & prevLogger; + Descriptor fd; - JSONLogger(Logger & prevLogger) : prevLogger(prevLogger) { } + JSONLogger(Descriptor fd) : fd(fd) { } bool isVerbose() override { return true; @@ -190,7 +203,7 @@ struct JSONLogger : Logger { void write(const nlohmann::json & json) { - prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace)); + writeLine(fd, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace)); } void log(Verbosity lvl, std::string_view s) override @@ -262,9 +275,9 @@ struct JSONLogger : Logger { } }; -Logger * makeJSONLogger(Logger & prevLogger) +std::unique_ptr makeJSONLogger(Descriptor fd) { - return new JSONLogger(prevLogger); + return std::make_unique(fd); } static Logger::Fields getFields(nlohmann::json & json) diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh index 11e4033a5..e5a7a833f 100644 --- a/src/libutil/logging.hh +++ b/src/libutil/logging.hh @@ -3,6 +3,8 @@ #include "error.hh" #include "config.hh" +#include "file-descriptor.hh" +#include "finally.hh" #include @@ -74,6 +76,17 @@ public: virtual void stop() { }; + /** + * Guard object to resume the logger when done. + */ + struct Suspension { + Finally> _finalize; + }; + + Suspension suspend(); + + std::optional suspendIf(bool cond); + virtual void pause() { }; virtual void resume() { }; @@ -179,11 +192,11 @@ struct PushActivity ~PushActivity() { setCurActivity(prevAct); } }; -extern Logger * logger; +extern std::unique_ptr logger; -Logger * makeSimpleLogger(bool printBuildLogs = true); +std::unique_ptr makeSimpleLogger(bool printBuildLogs = true); -Logger * makeJSONLogger(Logger & prevLogger); +std::unique_ptr makeJSONLogger(Descriptor fd); /** * @param source A noun phrase describing the source of the message, e.g. "the builder". diff --git a/src/libutil/meson.build b/src/libutil/meson.build index ac701d8fd..9e70d0549 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -62,6 +62,12 @@ elif host_machine.system() == 'sunos' deps_other += [socket, network_service_library] endif +blake3 = dependency( + 'libblake3', + version: '>= 1.5.5', +) +deps_private += blake3 + boost = dependency( 'boost', modules : ['context', 'coroutine'], @@ -147,7 +153,9 @@ sources = files( 'json-utils.cc', 'logging.cc', 'memory-source-accessor.cc', + 'mounted-source-accessor.cc', 'position.cc', + 'pos-table.cc', 'posix-source-accessor.cc', 'references.cc', 'serialise.cc', @@ -160,6 +168,7 @@ sources = files( 'tarfile.cc', 'terminal.cc', 'thread-pool.cc', + 'union-source-accessor.cc', 'unix-domain-socket.cc', 'url.cc', 'users.cc', @@ -217,6 +226,8 @@ headers = [config_h] + files( 'muxable-pipe.hh', 'os-string.hh', 'pool.hh', + 'pos-idx.hh', + 'pos-table.hh', 'position.hh', 'posix-source-accessor.hh', 'processes.hh', diff --git a/src/libfetchers/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc similarity index 92% rename from src/libfetchers/mounted-source-accessor.cc rename to src/libutil/mounted-source-accessor.cc index 68f3a546b..79223d155 100644 --- a/src/libfetchers/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -1,4 +1,4 @@ -#include "mounted-source-accessor.hh" +#include "source-accessor.hh" namespace nix { @@ -23,12 +23,6 @@ struct MountedSourceAccessor : SourceAccessor return accessor->readFile(subpath); } - bool pathExists(const CanonPath & path) override - { - auto [accessor, subpath] = resolve(path); - return accessor->pathExists(subpath); - } - std::optional maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); @@ -69,6 +63,12 @@ struct MountedSourceAccessor : SourceAccessor path.pop(); } } + + std::optional getPhysicalPath(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->getPhysicalPath(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/package.nix b/src/libutil/package.nix index fcc74c247..b85dcd58f 100644 --- a/src/libutil/package.nix +++ b/src/libutil/package.nix @@ -6,6 +6,7 @@ boost, brotli, libarchive, + libblake3, libcpuid, libsodium, nlohmann_json, @@ -42,6 +43,7 @@ mkMesonLibrary (finalAttrs: { buildInputs = [ brotli + libblake3 libsodium openssl ] ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid; diff --git a/src/libexpr/pos-idx.hh b/src/libutil/pos-idx.hh similarity index 98% rename from src/libexpr/pos-idx.hh rename to src/libutil/pos-idx.hh index 2faa6b7fe..c1749ba69 100644 --- a/src/libexpr/pos-idx.hh +++ b/src/libutil/pos-idx.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include diff --git a/src/libutil/pos-table.cc b/src/libutil/pos-table.cc new file mode 100644 index 000000000..8178beb90 --- /dev/null +++ b/src/libutil/pos-table.cc @@ -0,0 +1,37 @@ +#include "pos-table.hh" + +#include + +namespace nix { + +/* Position table. */ + +Pos PosTable::operator[](PosIdx p) const +{ + auto origin = resolve(p); + if (!origin) + return {}; + + const auto offset = origin->offsetOf(p); + + Pos result{0, 0, origin->origin}; + auto lines = this->lines.lock(); + auto linesForInput = (*lines)[origin->offset]; + + if (linesForInput.empty()) { + auto source = result.getSource().value_or(""); + const char * begin = source.data(); + for (Pos::LinesIterator it(source), end; it != end; it++) + linesForInput.push_back(it->data() - begin); + if (linesForInput.empty()) + linesForInput.push_back(0); + } + // as above: the first line starts at byte 0 and is always present + auto lineStartOffset = std::prev(std::upper_bound(linesForInput.begin(), linesForInput.end(), offset)); + + result.line = 1 + (lineStartOffset - linesForInput.begin()); + result.column = 1 + (offset - *lineStartOffset); + return result; +} + +} diff --git a/src/libexpr/pos-table.hh b/src/libutil/pos-table.hh similarity index 77% rename from src/libexpr/pos-table.hh rename to src/libutil/pos-table.hh index ba2b91cf3..a6fe09d79 100644 --- a/src/libexpr/pos-table.hh +++ b/src/libutil/pos-table.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -18,9 +19,12 @@ public: private: uint32_t offset; - Origin(Pos::Origin origin, uint32_t offset, size_t size): - offset(offset), origin(origin), size(size) - {} + Origin(Pos::Origin origin, uint32_t offset, size_t size) + : offset(offset) + , origin(origin) + , size(size) + { + } public: const Pos::Origin origin; @@ -72,6 +76,17 @@ public: return PosIdx(1 + origin.offset + offset); } + /** + * Convert a byte-offset PosIdx into a Pos with line/column information. + * + * @param p Byte offset into the virtual concatenation of all parsed contents + * @return Position + * + * @warning Very expensive to call, as this has to read the entire source + * into memory each time. Call this only if absolutely necessary. Prefer + * to keep PosIdx around instead of needlessly converting it into Pos by + * using this lookup method. + */ Pos operator[](PosIdx p) const; Pos::Origin originOf(PosIdx p) const diff --git a/src/libutil/position.cc b/src/libutil/position.cc index 946f167b6..275985c8c 100644 --- a/src/libutil/position.cc +++ b/src/libutil/position.cc @@ -66,6 +66,13 @@ std::optional Pos::getSource() const }, origin); } +std::optional Pos::getSourcePath() const +{ + if (auto * path = std::get_if(&origin)) + return *path; + return std::nullopt; +} + void Pos::print(std::ostream & out, bool showOrigin) const { if (showOrigin) { diff --git a/src/libutil/position.hh b/src/libutil/position.hh index 25217069c..07e261c4c 100644 --- a/src/libutil/position.hh +++ b/src/libutil/position.hh @@ -50,6 +50,7 @@ struct Pos explicit operator bool() const { return line > 0; } + /* TODO: Why std::shared_ptr and not std::shared_ptr? */ operator std::shared_ptr() const; /** @@ -69,9 +70,7 @@ struct Pos /** * Get the SourcePath, if the source was loaded from a file. */ - std::optional getSourcePath() const { - return *std::get_if(&origin); - } + std::optional getSourcePath() const; struct LinesIterator { using difference_type = size_t; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 381e7ae38..d612c11b2 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -227,8 +227,7 @@ std::unique_ptr sourceToSink(std::function fun) throw EndOfFile("coroutine has finished"); } - size_t n = std::min(cur.size(), out_len); - memcpy(out, cur.data(), n); + size_t n = cur.copy(out, out_len); cur.remove_prefix(n); return n; }); @@ -260,7 +259,7 @@ std::unique_ptr sinkToSource( { struct SinkToSource : Source { - typedef boost::coroutines2::coroutine coro_t; + typedef boost::coroutines2::coroutine coro_t; std::function fun; std::function eof; @@ -271,33 +270,37 @@ std::unique_ptr sinkToSource( { } - std::string cur; - size_t pos = 0; + std::string_view cur; size_t read(char * data, size_t len) override { - if (!coro) { + bool hasCoro = coro.has_value(); + if (!hasCoro) { coro = coro_t::pull_type([&](coro_t::push_type & yield) { LambdaSink sink([&](std::string_view data) { - if (!data.empty()) yield(std::string(data)); + if (!data.empty()) { + yield(data); + } }); fun(sink); }); } - if (!*coro) { eof(); unreachable(); } - - if (pos == cur.size()) { - if (!cur.empty()) { + if (cur.empty()) { + if (hasCoro) { (*coro)(); } - cur = coro->get(); - pos = 0; + if (*coro) { + cur = coro->get(); + } else { + coro.reset(); + eof(); + unreachable(); + } } - auto n = std::min(cur.size() - pos, len); - memcpy(data, cur.data() + pos, n); - pos += n; + size_t n = cur.copy(data, len); + cur.remove_prefix(n); return n; } diff --git a/src/libutil/source-accessor.hh b/src/libutil/source-accessor.hh index 42af8256a..79ae092ac 100644 --- a/src/libutil/source-accessor.hh +++ b/src/libutil/source-accessor.hh @@ -214,4 +214,12 @@ ref getFSSourceAccessor(); */ ref makeFSSourceAccessor(std::filesystem::path root); +ref makeMountedSourceAccessor(std::map> mounts); + +/** + * Construct an accessor that presents a "union" view of a vector of + * underlying accessors. Earlier accessors take precedence over later. + */ +ref makeUnionSourceAccessor(std::vector> && accessors); + } diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc new file mode 100644 index 000000000..eec0850c2 --- /dev/null +++ b/src/libutil/union-source-accessor.cc @@ -0,0 +1,82 @@ +#include "source-accessor.hh" + +namespace nix { + +struct UnionSourceAccessor : SourceAccessor +{ + std::vector> accessors; + + UnionSourceAccessor(std::vector> _accessors) + : accessors(std::move(_accessors)) + { + displayPrefix.clear(); + } + + std::string readFile(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (st) + return accessor->readFile(path); + } + throw FileNotFound("path '%s' does not exist", showPath(path)); + } + + std::optional maybeLstat(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (st) + return st; + } + return std::nullopt; + } + + DirEntries readDirectory(const CanonPath & path) override + { + DirEntries result; + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (!st) + continue; + for (auto & entry : accessor->readDirectory(path)) + // Don't override entries from previous accessors. + result.insert(entry); + } + return result; + } + + std::string readLink(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (st) + return accessor->readLink(path); + } + throw FileNotFound("path '%s' does not exist", showPath(path)); + } + + std::string showPath(const CanonPath & path) override + { + for (auto & accessor : accessors) + return accessor->showPath(path); + return SourceAccessor::showPath(path); + } + + std::optional getPhysicalPath(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto p = accessor->getPhysicalPath(path); + if (p) + return p; + } + return std::nullopt; + } +}; + +ref makeUnionSourceAccessor(std::vector> && accessors) +{ + return make_ref(std::move(accessors)); +} + +} diff --git a/src/libutil/unix/file-descriptor.cc b/src/libutil/unix/file-descriptor.cc index ac7c086af..a02a53b1e 100644 --- a/src/libutil/unix/file-descriptor.cc +++ b/src/libutil/unix/file-descriptor.cc @@ -5,9 +5,27 @@ #include #include +#include namespace nix { +namespace { + +// This function is needed to handle non-blocking reads/writes. This is needed in the buildhook, because +// somehow the json logger file descriptor ends up beeing non-blocking and breaks remote-building. +// TODO: get rid of buildhook and remove this function again (https://github.com/NixOS/nix/issues/12688) +void pollFD(int fd, int events) +{ + struct pollfd pfd; + pfd.fd = fd; + pfd.events = events; + int ret = poll(&pfd, 1, -1); + if (ret == -1) { + throw SysError("poll on file descriptor failed"); + } +} +} + std::string readFile(int fd) { struct stat st; @@ -17,14 +35,18 @@ std::string readFile(int fd) return drainFD(fd, true, st.st_size); } - void readFull(int fd, char * buf, size_t count) { while (count) { checkInterrupt(); ssize_t res = read(fd, buf, count); if (res == -1) { - if (errno == EINTR) continue; + switch (errno) { + case EINTR: continue; + case EAGAIN: + pollFD(fd, POLLIN); + continue; + } throw SysError("reading from file"); } if (res == 0) throw EndOfFile("unexpected end-of-file"); @@ -39,8 +61,15 @@ void writeFull(int fd, std::string_view s, bool allowInterrupts) while (!s.empty()) { if (allowInterrupts) checkInterrupt(); ssize_t res = write(fd, s.data(), s.size()); - if (res == -1 && errno != EINTR) + if (res == -1) { + switch (errno) { + case EINTR: continue; + case EAGAIN: + pollFD(fd, POLLOUT); + continue; + } throw SysError("writing to file"); + } if (res > 0) s.remove_prefix(res); } @@ -56,8 +85,15 @@ std::string readLine(int fd, bool eofOk) // FIXME: inefficient ssize_t rd = read(fd, &ch, 1); if (rd == -1) { - if (errno != EINTR) + switch (errno) { + case EINTR: continue; + case EAGAIN: { + pollFD(fd, POLLIN); + continue; + } + default: throw SysError("reading a line"); + } } else if (rd == 0) { if (eofOk) return s; diff --git a/src/libutil/unix/monitor-fd.hh b/src/libutil/unix/monitor-fd.hh index b6610feff..c1f8705eb 100644 --- a/src/libutil/unix/monitor-fd.hh +++ b/src/libutil/unix/monitor-fd.hh @@ -14,35 +14,74 @@ namespace nix { - class MonitorFdHup { private: std::thread thread; + Pipe notifyPipe; public: MonitorFdHup(int fd) { - thread = std::thread([fd]() { + notifyPipe.create(); + thread = std::thread([this, fd]() { while (true) { - /* Wait indefinitely until a POLLHUP occurs. */ - struct pollfd fds[1]; - fds[0].fd = fd; - /* Polling for no specific events (i.e. just waiting - for an error/hangup) doesn't work on macOS - anymore. So wait for read events and ignore - them. */ - fds[0].events = - #ifdef __APPLE__ - POLLRDNORM - #else + // There is a POSIX violation on macOS: you have to listen for + // at least POLLHUP to receive HUP events for a FD. POSIX says + // this is not so, and you should just receive them regardless. + // However, as of our testing on macOS 14.5, the events do not + // get delivered if in the all-bits-unset case, but do get + // delivered if `POLLHUP` is set. + // + // This bug filed as rdar://37537852 + // (https://openradar.appspot.com/37537852). + // + // macOS's own man page + // (https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/poll.2.html) + // additionally says that `POLLHUP` is ignored as an input. It + // seems the likely order of events here was + // + // 1. macOS did not follow the POSIX spec + // + // 2. Somebody ninja-fixed this other spec violation to make + // sure `POLLHUP` was not forgotten about, even though they + // "fixed" this issue in a spec-non-compliant way. Whatever, + // we'll use the fix. + // + // Relevant code, current version, which shows the : + // https://github.com/apple-oss-distributions/xnu/blob/94d3b452840153a99b38a3a9659680b2a006908e/bsd/kern/sys_generic.c#L1751-L1758 + // + // The `POLLHUP` detection was added in + // https://github.com/apple-oss-distributions/xnu/commit/e13b1fa57645afc8a7b2e7d868fe9845c6b08c40#diff-a5aa0b0e7f4d866ca417f60702689fc797e9cdfe33b601b05ccf43086c35d395R1468 + // That means added in 2007 or earlier. Should be good enough + // for us. + short hangup_events = +#ifdef __APPLE__ + POLLHUP +#else 0 - #endif +#endif ; - auto count = poll(fds, 1, -1); - if (count == -1) - unreachable(); + /* Wait indefinitely until a POLLHUP occurs. */ + constexpr size_t num_fds = 2; + struct pollfd fds[num_fds] = { + { + .fd = fd, + .events = hangup_events, + }, + { + .fd = notifyPipe.readSide.get(), + .events = hangup_events, + }, + }; + + auto count = poll(fds, num_fds, -1); + if (count == -1) { + if (errno == EINTR || errno == EAGAIN) + continue; + throw SysError("failed to poll() in MonitorFdHup"); + } /* This shouldn't happen, but can on macOS due to a bug. See rdar://37550628. @@ -50,25 +89,42 @@ public: coordination with the main thread if spinning proves too harmful. */ - if (count == 0) continue; + if (count == 0) + continue; if (fds[0].revents & POLLHUP) { unix::triggerInterrupt(); break; } - /* This will only happen on macOS. We sleep a bit to - avoid waking up too often if the client is sending - input. */ - sleep(1); + if (fds[1].revents & POLLHUP) { + break; + } + // On macOS, (jade thinks that) it is possible (although not + // observed on macOS 14.5) that in some limited cases on buggy + // kernel versions, all the non-POLLHUP events for the socket + // get delivered. + // + // We could sleep to avoid pointlessly spinning a thread on + // those, but this opens up a different problem, which is that + // if do sleep, it will be longer before the daemon fork for a + // client exits. Imagine a sequential shell script, running Nix + // commands, each of which talk to the daemon. If the previous + // command registered a temp root, exits, and then the next + // command issues a delete request before the temp root is + // cleaned up, that delete request might fail. + // + // Not sleeping doesn't actually fix the race condition --- we + // would need to block on the old connections' tempt roots being + // cleaned up in in the new connection --- but it does make it + // much less likely. } }); }; ~MonitorFdHup() { - pthread_cancel(thread.native_handle()); + notifyPipe.writeSide.close(); thread.join(); } }; - } diff --git a/src/libutil/unix/processes.cc b/src/libutil/unix/processes.cc index 43d9179d9..da198bed4 100644 --- a/src/libutil/unix/processes.cc +++ b/src/libutil/unix/processes.cc @@ -200,8 +200,15 @@ static int childEntry(void * arg) pid_t startProcess(std::function fun, const ProcessOptions & options) { ChildWrapperFunction wrapper = [&] { - if (!options.allowVfork) + if (!options.allowVfork) { + /* Set a simple logger, while releasing (not destroying) + the parent logger. We don't want to run the parent + logger's destructor since that will crash (e.g. when + ~ProgressBar() tries to join a thread that doesn't + exist. */ + logger.release(); logger = makeSimpleLogger(); + } try { #if __linux__ if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) @@ -299,15 +306,7 @@ void runProgram2(const RunOptions & options) // case), so we can't use it if we alter the environment processOptions.allowVfork = !options.environment; - std::optional>> resumeLoggerDefer; - if (options.isInteractive) { - logger->pause(); - resumeLoggerDefer.emplace( - []() { - logger->resume(); - } - ); - } + auto suspension = logger->suspendIf(options.isInteractive); /* Fork. */ Pid pid = startProcess([&] { diff --git a/src/libutil/windows/processes.cc b/src/libutil/windows/processes.cc index fd4d7c43a..90cb1f5f5 100644 --- a/src/libutil/windows/processes.cc +++ b/src/libutil/windows/processes.cc @@ -312,11 +312,7 @@ void runProgram2(const RunOptions & options) // TODO: Implement shebang / program interpreter lookup on Windows auto interpreter = getProgramInterpreter(realProgram); - std::optional>> resumeLoggerDefer; - if (options.isInteractive) { - logger->pause(); - resumeLoggerDefer.emplace([]() { logger->resume(); }); - } + auto suspension = logger->suspendIf(options.isInteractive); Pid pid = spawnProcess(interpreter.has_value() ? *interpreter : realProgram, options, out, in); diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 5410f0cab..a5ae12a12 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -11,6 +11,7 @@ #include "current-process.hh" #include "parsed-derivations.hh" +#include "derivation-options.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "globals.hh" @@ -543,12 +544,13 @@ static void main_nix_build(int argc, char * * argv) env["NIX_STORE"] = store->storeDir; env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores); - auto passAsFile = tokenizeString(getOr(drv.env, "passAsFile", "")); + ParsedDerivation parsedDrv(packageInfo.requireDrvPath(), drv); + DerivationOptions drvOptions = DerivationOptions::fromParsedDerivation(parsedDrv); int fileNr = 0; for (auto & var : drv.env) - if (passAsFile.count(var.first)) { + if (drvOptions.passAsFile.count(var.first)) { auto fn = ".attr-" + std::to_string(fileNr++); Path p = (tmpDir.path() / fn).string(); writeFile(p, var.second); @@ -558,7 +560,7 @@ static void main_nix_build(int argc, char * * argv) std::string structuredAttrsRC; - if (env.count("__json")) { + if (parsedDrv.hasStructuredAttrs()) { StorePathSet inputs; std::function::ChildNode &)> accumInputClosure; @@ -576,8 +578,6 @@ static void main_nix_build(int argc, char * * argv) for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map) accumInputClosure(inputDrv, inputNode); - ParsedDerivation parsedDrv(packageInfo.requireDrvPath(), drv); - if (auto structAttrs = parsedDrv.prepareStructuredAttrs(*store, inputs)) { auto json = structAttrs.value(); structuredAttrsRC = writeStructuredAttrsShell(json); diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 3fb69a29d..d182b1eee 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -253,7 +253,7 @@ static StorePathSet maybeUseOutputs(const StorePath & storePath, bool useOutput, return store->queryDerivationOutputs(storePath); for (auto & i : drv.outputsAndOptPaths(*store)) { if (!i.second.second) - throw UsageError("Cannot use output path of floating content-addressed derivation until we know what it is (e.g. by building it)"); + throw UsageError("Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)"); outputs.insert(*i.second.second); } return outputs; diff --git a/src/nix/build.cc b/src/nix/build.cc index 3569b0cde..4ba6241ec 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -3,7 +3,6 @@ #include "shared.hh" #include "store-api.hh" #include "local-fs-store.hh" -#include "progress-bar.hh" #include @@ -120,7 +119,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile createOutLinks(outLink, toBuiltPaths(buildables), *store2); if (printOutputPaths) { - stopProgressBar(); + logger->stop(); for (auto & buildable : buildables) { std::visit(overloaded { [&](const BuiltPath::Opaque & bo) { diff --git a/src/nix/cat.cc b/src/nix/cat.cc index e0179c348..214d256e9 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -1,7 +1,6 @@ #include "command.hh" #include "store-api.hh" #include "nar-accessor.hh" -#include "progress-bar.hh" using namespace nix; @@ -14,7 +13,7 @@ struct MixCat : virtual Args auto st = accessor->lstat(CanonPath(path)); if (st.type != SourceAccessor::Type::tRegular) throw Error("path '%1%' is not a regular file", path); - stopProgressBar(); + logger->stop(); writeFull(getStandardOutput(), accessor->readFile(CanonPath(path))); } diff --git a/src/nix/crash-handler.cc b/src/nix/crash-handler.cc new file mode 100644 index 000000000..8ffd436ac --- /dev/null +++ b/src/nix/crash-handler.cc @@ -0,0 +1,67 @@ +#include "crash-handler.hh" +#include "fmt.hh" +#include "logging.hh" + +#include +#include +#include + +// Darwin and FreeBSD stdenv do not define _GNU_SOURCE but do have _Unwind_Backtrace. +#if __APPLE__ || __FreeBSD__ +# define BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED +#endif + +#include + +#ifndef _WIN32 +# include +#endif + +namespace nix { + +namespace { + +void logFatal(std::string const & s) +{ + writeToStderr(s + "\n"); + // std::string for guaranteed null termination +#ifndef _WIN32 + syslog(LOG_CRIT, "%s", s.c_str()); +#endif +} + +void onTerminate() +{ + logFatal( + "Nix crashed. This is a bug. Please report this at https://github.com/NixOS/nix/issues with the following information included:\n"); + try { + std::exception_ptr eptr = std::current_exception(); + if (eptr) { + std::rethrow_exception(eptr); + } else { + logFatal("std::terminate() called without exception"); + } + } catch (const std::exception & ex) { + logFatal(fmt("Exception: %s: %s", boost::core::demangle(typeid(ex).name()), ex.what())); + } catch (...) { + logFatal("Unknown exception!"); + } + + logFatal("Stack trace:"); + std::stringstream ss; + ss << boost::stacktrace::stacktrace(); + logFatal(ss.str()); + + std::abort(); +} +} + +void registerCrashHandler() +{ + // DO NOT use this for signals. Boost stacktrace is very much not + // async-signal-safe, and in a world with ASLR, addr2line is pointless. + // + // If you want signals, set up a minidump system and do it out-of-process. + std::set_terminate(onTerminate); +} +} diff --git a/src/nix/crash-handler.hh b/src/nix/crash-handler.hh new file mode 100644 index 000000000..018e86747 --- /dev/null +++ b/src/nix/crash-handler.hh @@ -0,0 +1,11 @@ +#pragma once +/// @file Crash handler for Nix that prints back traces (hopefully in instances where it is not just going to crash the +/// process itself). + +namespace nix { + +/** Registers the Nix crash handler for std::terminate (currently; will support more crashes later). See also + * detectStackOverflow(). */ +void registerCrashHandler(); + +} diff --git a/src/nix/derivation-show.cc b/src/nix/derivation-show.cc index bf637246d..5a07f58e6 100644 --- a/src/nix/derivation-show.cc +++ b/src/nix/derivation-show.cc @@ -1,5 +1,5 @@ -// FIXME: integrate this with nix path-info? -// FIXME: rename to 'nix store derivation show' or 'nix debug derivation show'? +// FIXME: integrate this with `nix path-info`? +// FIXME: rename to 'nix store derivation show'? #include "command.hh" #include "common-args.hh" diff --git a/src/nix/develop.cc b/src/nix/develop.cc index deee89aa1..961962ebd 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -7,7 +7,6 @@ #include "store-api.hh" #include "outputs-spec.hh" #include "derivations.hh" -#include "progress-bar.hh" #ifndef _WIN32 // TODO re-enable on Windows # include "run.hh" @@ -731,7 +730,7 @@ struct CmdPrintDevEnv : Common, MixJSON { auto buildEnvironment = getBuildEnvironment(store, installable).first; - stopProgressBar(); + logger->stop(); if (json) { logger->writeToStdout(buildEnvironment.toJSON()); diff --git a/src/nix/edit.cc b/src/nix/edit.cc index 9cbab230b..49807da9e 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -3,7 +3,6 @@ #include "shared.hh" #include "eval.hh" #include "attr-path.hh" -#include "progress-bar.hh" #include "editor-for.hh" #include @@ -40,7 +39,7 @@ struct CmdEdit : InstallableValueCommand } }(); - stopProgressBar(); + logger->stop(); auto args = editorFor(file, line); diff --git a/src/nix/eval.cc b/src/nix/eval.cc index 7811b77ed..e038d75c3 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -5,7 +5,6 @@ #include "eval.hh" #include "eval-inline.hh" #include "value-to-json.hh" -#include "progress-bar.hh" #include @@ -75,7 +74,7 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption } if (writeTo) { - stopProgressBar(); + logger->stop(); if (fs::symlink_exists(*writeTo)) throw Error("path '%s' already exists", writeTo->string()); @@ -114,7 +113,7 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption } else if (raw) { - stopProgressBar(); + logger->stop(); writeFull(getStandardOutput(), *state->coerceToString(noPos, *v, context, "while generating the eval command output")); } diff --git a/src/nix/flake-prefetch.md b/src/nix/flake-prefetch.md index a1cf0289a..4666aadc4 100644 --- a/src/nix/flake-prefetch.md +++ b/src/nix/flake-prefetch.md @@ -5,10 +5,14 @@ R""( * Download a tarball and unpack it: ```console - # nix flake prefetch https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.5.tar.xz + # nix flake prefetch https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.5.tar.xz --out-link ./result Downloaded 'https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.5.tar.xz?narHash=sha256-3XYHZANT6AFBV0BqegkAZHbba6oeDkIUCDwbATLMhAY=' to '/nix/store/sl5vvk8mb4ma1sjyy03kwpvkz50hd22d-source' (hash 'sha256-3XYHZANT6AFBV0BqegkAZHbba6oeDkIUCDwbATLMhAY='). + + # cat ./result/README + Linux kernel + … ``` * Download the `dwarffs` flake (looked up in the flake registry): diff --git a/src/nix/flake.cc b/src/nix/flake.cc index d7a0369f6..cbd412547 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -17,6 +17,8 @@ #include "eval-cache.hh" #include "markdown.hh" #include "users.hh" +#include "fetch-to-store.hh" +#include "local-fs-store.hh" #include #include @@ -95,20 +97,20 @@ public: .optional=true, .handler={[&](std::vector inputsToUpdate){ for (const auto & inputToUpdate : inputsToUpdate) { - InputPath inputPath; + InputAttrPath inputAttrPath; try { - inputPath = flake::parseInputPath(inputToUpdate); + inputAttrPath = flake::parseInputAttrPath(inputToUpdate); } catch (Error & e) { warn("Invalid flake input '%s'. To update a specific flake, use 'nix flake update --flake %s' instead.", inputToUpdate, inputToUpdate); throw e; } - if (lockFlags.inputUpdates.contains(inputPath)) - warn("Input '%s' was specified multiple times. You may have done this by accident."); - lockFlags.inputUpdates.insert(inputPath); + if (lockFlags.inputUpdates.contains(inputAttrPath)) + warn("Input '%s' was specified multiple times. You may have done this by accident.", printInputAttrPath(inputAttrPath)); + lockFlags.inputUpdates.insert(inputAttrPath); } }}, .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { - completeFlakeInputPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix); + completeFlakeInputAttrPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix); }} }); @@ -214,7 +216,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON auto & flake = lockedFlake.flake; // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(sourcePathToStorePath(store, flake.path).first); + auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); if (json) { nlohmann::json j; @@ -304,7 +306,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON } else if (auto follows = std::get_if<1>(&input.second)) { logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL " follows input '%s'", prefix + (last ? treeLast : treeConn), input.first, - printInputPath(*follows)); + printInputAttrPath(*follows)); } } }; @@ -1077,7 +1079,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun StorePathSet sources; - auto storePath = sourcePathToStorePath(store, flake.flake.path).first; + auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; sources.insert(storePath); @@ -1431,8 +1433,18 @@ struct CmdFlakeShow : FlakeCommand, MixJSON struct CmdFlakePrefetch : FlakeCommand, MixJSON { + std::optional outLink; + CmdFlakePrefetch() { + addFlag({ + .longName = "out-link", + .shortName = 'o', + .description = "Create symlink named *path* to the resulting store path.", + .labels = {"path"}, + .handler = {&outLink}, + .completer = completePath + }); } std::string description() override @@ -1451,7 +1463,8 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON { auto originalRef = getFlakeRef(); auto resolvedRef = originalRef.resolve(store); - auto [storePath, lockedRef] = resolvedRef.fetchTree(store); + auto [accessor, lockedRef] = resolvedRef.lazyFetch(store); + auto storePath = fetchToStore(*store, accessor, FetchMode::Copy, lockedRef.input.getName()); auto hash = store->queryPathInfo(storePath)->narHash; if (json) { @@ -1467,6 +1480,13 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON store->printStorePath(storePath), hash.to_string(HashFormat::SRI, true)); } + + if (outLink) { + if (auto store2 = store.dynamic_pointer_cast()) + createOutLinks(*outLink, {BuiltPath::Opaque{storePath}}, *store2); + else + throw Error("'--out-link' is not supported for this Nix store"); + } } }; diff --git a/src/nix/log.cc b/src/nix/log.cc index 1a6f48f5e..2c35ed803 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -3,7 +3,6 @@ #include "shared.hh" #include "store-api.hh" #include "log-store.hh" -#include "progress-bar.hh" using namespace nix; @@ -55,7 +54,7 @@ struct CmdLog : InstallableCommand auto log = logSub.getBuildLog(path); if (!log) continue; - stopProgressBar(); + logger->stop(); printInfo("got build log for '%s' from '%s'", installable->what(), logSub.getUri()); writeFull(getStandardOutput(), *log); return; diff --git a/src/nix/main.cc b/src/nix/main.cc index f8f9d03a4..13438b31a 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -20,6 +20,7 @@ #include "flake/flake.hh" #include "self-exe.hh" #include "json-utils.hh" +#include "crash-handler.hh" #include #include @@ -351,6 +352,8 @@ void mainWrapped(int argc, char * * argv) { savedArgv = argv; + registerCrashHandler(); + /* The chroot helper needs to be run before any threads have been started. */ #ifndef _WIN32 @@ -385,8 +388,6 @@ void mainWrapped(int argc, char * * argv) } #endif - Finally f([] { logger->stop(); }); - programPath = argv[0]; auto programName = std::string(baseNameOf(programPath)); auto extensionPos = programName.find_last_of("."); @@ -555,10 +556,11 @@ int main(int argc, char * * argv) { // The CLI has a more detailed version than the libraries; see nixVersion. nix::nixVersion = NIX_CLI_VERSION; - +#ifndef _WIN32 // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. nix::setStackSize(64 * 1024 * 1024); +#endif return nix::handleExceptions(argv[0], [&]() { nix::mainWrapped(argc, argv); diff --git a/src/nix/meson.build b/src/nix/meson.build index 1ad3d5b5a..79ad840f6 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -77,6 +77,7 @@ nix_sources = [config_h] + files( 'config-check.cc', 'config.cc', 'copy.cc', + 'crash-handler.cc', 'derivation-add.cc', 'derivation-show.cc', 'derivation.cc', @@ -238,7 +239,7 @@ foreach linkname : nix_symlinks # The 'runtime' tag is what executables default to, which we want to emulate here. install_tag : 'runtime' ) - t = custom_target( + custom_target( command: ['ln', '-sf', fs.name(this_exe), '@OUTPUT@'], output: linkname + executable_suffix, # native doesn't allow dangling symlinks, but the target executable often doesn't exist at this time diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 84c0224e2..ba2fd39d8 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -4,7 +4,7 @@ #include "store-api.hh" #include "filetransfer.hh" #include "finally.hh" -#include "progress-bar.hh" +#include "loggers.hh" #include "tarfile.hh" #include "attr-path.hh" #include "eval-inline.hh" @@ -190,10 +190,7 @@ static int main_nix_prefetch_url(int argc, char * * argv) if (args.size() > 2) throw UsageError("too many arguments"); - Finally f([]() { stopProgressBar(); }); - - if (isTTY()) - startProgressBar(); + setLogFormat("bar"); auto store = openStore(); auto state = std::make_unique(myArgs.lookupPath, store, fetchSettings, evalSettings); @@ -247,7 +244,7 @@ static int main_nix_prefetch_url(int argc, char * * argv) auto [storePath, hash] = prefetchFile( store, resolveMirrorUrl(*state, url), name, ha, expectedHash, unpack, executable); - stopProgressBar(); + logger->stop(); if (!printPath) printInfo("path is '%s'", store->printStorePath(storePath)); diff --git a/src/nix/run.cc b/src/nix/run.cc index a9f9ef60f..897824d68 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -9,7 +9,6 @@ #include "local-fs-store.hh" #include "finally.hh" #include "source-accessor.hh" -#include "progress-bar.hh" #include "eval.hh" #include @@ -34,7 +33,7 @@ void execProgramInStore(ref store, const Strings & args, std::optional system) { - stopProgressBar(); + logger->stop(); restoreProcessContext(); diff --git a/src/nix/search.md b/src/nix/search.md index f65ac9b17..d355a7764 100644 --- a/src/nix/search.md +++ b/src/nix/search.md @@ -62,8 +62,8 @@ R""( # Description -`nix search` searches [*installable*](./nix.md#installables) (which can be evaluated, that is, a -flake or Nix expression, but not a store path or store derivation path) for packages whose name or description matches all of the +`nix search` searches [*installable*](./nix.md#installables) that can be evaluated, that is, a +flake or Nix expression, but not a [store path] or [deriving path]) for packages whose name or description matches all of the regular expressions *regex*. For each matching package, It prints the full attribute name (from the root of the [installable](./nix.md#installables)), the version and the `meta.description` field, highlighting the substrings that @@ -75,6 +75,9 @@ it avoids highlighting the entire name and description of every package. > Note that in this context, `^` is the regex character to match the beginning of a string, *not* the delimiter for > [selecting a derivation output](@docroot@/command-ref/new-cli/nix.md#derivation-output-selection). +[store path]: @docroot@/glossary.md#gloss-store-path +[deriving path]: @docroot@/glossary.md#gloss-deriving-path + # Flake output attributes If no flake output attribute is given, `nix search` searches for diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 134d4f34a..10b99b452 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -3,7 +3,6 @@ #include "shared.hh" #include "store-api.hh" #include "thread-pool.hh" -#include "progress-bar.hh" #include @@ -175,7 +174,7 @@ struct CmdKeyGenerateSecret : Command if (!keyName) throw UsageError("required argument '--key-name' is missing"); - stopProgressBar(); + logger->stop(); writeFull(getStandardOutput(), SecretKey::generate(*keyName).to_string()); } }; @@ -197,7 +196,7 @@ struct CmdKeyConvertSecretToPublic : Command void run() override { SecretKey secretKey(drainFD(STDIN_FILENO)); - stopProgressBar(); + logger->stop(); writeFull(getStandardOutput(), secretKey.toPublicKey().to_string()); } }; diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index 1e8032af6..398e533ce 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -7,7 +7,6 @@ #include "eval-settings.hh" #include "attr-path.hh" #include "names.hh" -#include "progress-bar.hh" #include "executable-path.hh" #include "self-exe.hh" @@ -71,7 +70,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand auto version = DrvName(storePath.name()).version; if (dryRun) { - stopProgressBar(); + logger->stop(); warn("would upgrade to version %s", version); return; } @@ -89,7 +88,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand throw Error("could not verify that '%s' works", program); } - stopProgressBar(); + logger->stop(); { Activity act(*logger, lvlInfo, actUnknown, diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index e299585ff..ae5c45ae3 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -1,6 +1,5 @@ #include "command.hh" #include "store-api.hh" -#include "progress-bar.hh" #include "source-accessor.hh" #include "shared.hh" @@ -110,8 +109,6 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions auto dependencyPath = *optDependencyPath; auto dependencyPathHash = dependencyPath.hashPart(); - stopProgressBar(); // FIXME - auto accessor = store->getFSAccessor(); auto const inf = std::numeric_limits::max(); diff --git a/src/perl/meson.build b/src/perl/meson.build index 7b3716c17..599e91710 100644 --- a/src/perl/meson.build +++ b/src/perl/meson.build @@ -57,10 +57,10 @@ libdir = join_paths(prefix, get_option('libdir')) # Required Programs #------------------------------------------------- -xz = find_program('xz') +find_program('xz') xsubpp = find_program('xsubpp') perl = find_program('perl') -curl = find_program('curl') +find_program('curl') yath = find_program('yath', required : false) # Required Libraries @@ -157,7 +157,7 @@ subdir(lib_dir) if get_option('tests').enabled() yath_rc_conf = configuration_data() yath_rc_conf.set('lib_dir', lib_dir) - yath_rc = configure_file( + configure_file( output : '.yath.rc', input : '.yath.rc.in', configuration : yath_rc_conf, diff --git a/tests/functional/ca/content-addressed.nix b/tests/functional/ca/content-addressed.nix index 6ed9c185b..e15208491 100644 --- a/tests/functional/ca/content-addressed.nix +++ b/tests/functional/ca/content-addressed.nix @@ -16,7 +16,7 @@ in { seed ? 0, }: -# A simple content-addressed derivation. +# A simple content-addressing derivation. # The derivation can be arbitrarily modified by passing a different `seed`, # but the output will always be the same rec { diff --git a/tests/functional/ca/derivation-json.sh b/tests/functional/ca/derivation-json.sh index 1cf9ffd37..2103707a2 100644 --- a/tests/functional/ca/derivation-json.sh +++ b/tests/functional/ca/derivation-json.sh @@ -12,7 +12,7 @@ drvPath2=$(nix derivation add < "$TEST_HOME"/simple.json) [[ "$drvPath" = "$drvPath2" ]] -# Content-addressed derivations can be renamed. +# Content-addressing derivations can be renamed. jq '.name = "foo"' < "$TEST_HOME"/simple.json > "$TEST_HOME"/foo.json drvPath3=$(nix derivation add --dry-run < "$TEST_HOME"/foo.json) # With --dry-run nothing is actually written diff --git a/tests/functional/characterisation/framework.sh b/tests/functional/characterisation/framework.sh index 5ca125ab5..d2c2155db 100644 --- a/tests/functional/characterisation/framework.sh +++ b/tests/functional/characterisation/framework.sh @@ -1,5 +1,7 @@ # shellcheck shell=bash +badTestNames=() + # Golden test support # # Test that the output of the given test matches what is expected. If @@ -18,10 +20,11 @@ function diffAndAcceptInner() { fi # Diff so we get a nice message - if ! diff --color=always --unified "$expectedOrEmpty" "$got"; then - echo "FAIL: evaluation result of $testName not as expected" + if ! diff >&2 --color=always --unified "$expectedOrEmpty" "$got"; then + echo >&2 "FAIL: evaluation result of $testName not as expected" # shellcheck disable=SC2034 badDiff=1 + badTestNames+=("$testName") fi # Update expected if `_NIX_TEST_ACCEPT` is non-empty. @@ -42,14 +45,14 @@ function characterisationTestExit() { if test -n "${_NIX_TEST_ACCEPT-}"; then if (( "$badDiff" )); then set +x - echo 'Output did mot match, but accepted output as the persisted expected output.' - echo 'That means the next time the tests are run, they should pass.' + echo >&2 'Output did mot match, but accepted output as the persisted expected output.' + echo >&2 'That means the next time the tests are run, they should pass.' set -x else set +x - echo 'NOTE: Environment variable _NIX_TEST_ACCEPT is defined,' - echo 'indicating the unexpected output should be accepted as the expected output going forward,' - echo 'but no tests had unexpected output so there was no expected output to update.' + echo >&2 'NOTE: Environment variable _NIX_TEST_ACCEPT is defined,' + echo >&2 'indicating the unexpected output should be accepted as the expected output going forward,' + echo >&2 'but no tests had unexpected output so there was no expected output to update.' set -x fi if (( "$badExitCode" )); then @@ -60,16 +63,21 @@ function characterisationTestExit() { else if (( "$badDiff" )); then set +x - echo '' - echo 'You can rerun this test with:' - echo '' - echo " _NIX_TEST_ACCEPT=1 make tests/functional/${TEST_NAME}.sh.test" - echo '' - echo 'to regenerate the files containing the expected output,' - echo 'and then view the git diff to decide whether a change is' - echo 'good/intentional or bad/unintentional.' - echo 'If the diff contains arbitrary or impure information,' - echo 'please improve the normalization that the test applies to the output.' + echo >&2 '' + echo >&2 'The following tests had unexpected output:' + for testName in "${badTestNames[@]}"; do + echo >&2 " $testName" + done + echo >&2 '' + echo >&2 'You can rerun this test with:' + echo >&2 '' + echo >&2 " _NIX_TEST_ACCEPT=1 meson test ${TEST_NAME}" + echo >&2 '' + echo >&2 'to regenerate the files containing the expected output,' + echo >&2 'and then view the git diff to decide whether a change is' + echo >&2 'good/intentional or bad/unintentional.' + echo >&2 'If the diff contains arbitrary or impure information,' + echo >&2 'please improve the normalization that the test applies to the output.' set -x fi exit $(( "$badExitCode" + "$badDiff" )) diff --git a/tests/functional/chroot-store.sh b/tests/functional/chroot-store.sh index ccde3e90b..7300f04ba 100755 --- a/tests/functional/chroot-store.sh +++ b/tests/functional/chroot-store.sh @@ -2,6 +2,28 @@ source common.sh +# Regression test for #11503. +mkdir -p "$TEST_ROOT/directory" +cat > "$TEST_ROOT/directory/default.nix" < "$TEST_ROOT"/example.txt mkdir -p "$TEST_ROOT/x" diff --git a/tests/functional/dyn-drv/dep-built-drv-2.sh b/tests/functional/dyn-drv/dep-built-drv-2.sh new file mode 100644 index 000000000..3247720af --- /dev/null +++ b/tests/functional/dyn-drv/dep-built-drv-2.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +source common.sh + +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250205" + +TODO_NixOS # can't enable a sandbox feature easily + +enableFeatures 'recursive-nix' +restartDaemon + +NIX_BIN_DIR="$(dirname "$(type -p nix)")" +export NIX_BIN_DIR + +expectStderr 1 nix build -L --file ./non-trivial.nix --no-link | grepQuiet "Building dynamic derivations in one shot is not yet implemented" diff --git a/tests/functional/dyn-drv/failing-outer.sh b/tests/functional/dyn-drv/failing-outer.sh new file mode 100644 index 000000000..fbad70701 --- /dev/null +++ b/tests/functional/dyn-drv/failing-outer.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +source common.sh + +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250205" + +skipTest "dyn drv input scheduling had to be reverted for 2.27" + +expected=100 +if [[ -v NIX_DAEMON_PACKAGE ]]; then expected=1; fi # work around the daemon not returning a 100 status correctly + +expectStderr "$expected" nix-build ./text-hashed-output.nix -A failingWrapper --no-out-link \ + | grepQuiet "build of '.*use-dynamic-drv-in-non-dynamic-drv-wrong.drv' failed" diff --git a/tests/functional/dyn-drv/meson.build b/tests/functional/dyn-drv/meson.build index 5b60a4698..07145000d 100644 --- a/tests/functional/dyn-drv/meson.build +++ b/tests/functional/dyn-drv/meson.build @@ -12,8 +12,10 @@ suites += { 'recursive-mod-json.sh', 'build-built-drv.sh', 'eval-outputOf.sh', + 'failing-outer.sh', 'dep-built-drv.sh', 'old-daemon-error-hack.sh', + 'dep-built-drv-2.sh', ], 'workdir': meson.current_source_dir(), } diff --git a/tests/functional/dyn-drv/non-trivial.nix b/tests/functional/dyn-drv/non-trivial.nix new file mode 100644 index 000000000..5cfafbb62 --- /dev/null +++ b/tests/functional/dyn-drv/non-trivial.nix @@ -0,0 +1,77 @@ +with import ./config.nix; + +builtins.outputOf + (mkDerivation { + name = "make-derivations.drv"; + + requiredSystemFeatures = [ "recursive-nix" ]; + + buildCommand = '' + set -e + set -u + + PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH + + export NIX_CONFIG='extra-experimental-features = nix-command ca-derivations dynamic-derivations' + + declare -A deps=( + [a]="" + [b]="a" + [c]="a" + [d]="b c" + [e]="b c d" + ) + + # Cannot just literally include this, or Nix will think it is the + # *outer* derivation that's trying to refer to itself, and + # substitute the string too soon. + placeholder=$(nix eval --raw --expr 'builtins.placeholder "out"') + + declare -A drvs=() + for word in a b c d e; do + inputDrvs="" + for dep in ''${deps[$word]}; do + if [[ "$inputDrvs" != "" ]]; then + inputDrvs+="," + fi + read -r -d "" line <> \"\$out\""], + "builder": "${shell}", + "env": { + "out": "$placeholder", + "$word": "hello, from $word!", + "PATH": ${builtins.toJSON path} + }, + "inputDrvs": { + $inputDrvs + }, + "inputSrcs": [], + "name": "build-$word", + "outputs": { + "out": { + "method": "nar", + "hashAlgo": "sha256" + } + }, + "system": "${system}" + } + EOF + drvs[$word]="$(echo "$json" | nix derivation add)" + done + cp "''${drvs[e]}" $out + ''; + + __contentAddressed = true; + outputHashMode = "text"; + outputHashAlgo = "sha256"; + }).outPath + "out" diff --git a/tests/functional/dyn-drv/old-daemon-error-hack.nix b/tests/functional/dyn-drv/old-daemon-error-hack.nix index c9d4a62d4..d5da3b3ab 100644 --- a/tests/functional/dyn-drv/old-daemon-error-hack.nix +++ b/tests/functional/dyn-drv/old-daemon-error-hack.nix @@ -1,6 +1,6 @@ with import ./config.nix; -# A simple content-addressed derivation. +# A simple content-addressing derivation. # The derivation can be arbitrarily modified by passing a different `seed`, # but the output will always be the same rec { diff --git a/tests/functional/dyn-drv/text-hashed-output.nix b/tests/functional/dyn-drv/text-hashed-output.nix index 99203b518..59261bbbf 100644 --- a/tests/functional/dyn-drv/text-hashed-output.nix +++ b/tests/functional/dyn-drv/text-hashed-output.nix @@ -1,6 +1,6 @@ with import ./config.nix; -# A simple content-addressed derivation. +# A simple content-addressing derivation. # The derivation can be arbitrarily modified by passing a different `seed`, # but the output will always be the same rec { @@ -13,6 +13,7 @@ rec { echo "Hello World" > $out/hello ''; }; + producingDrv = mkDerivation { name = "hello.drv"; buildCommand = '' @@ -23,6 +24,7 @@ rec { outputHashMode = "text"; outputHashAlgo = "sha256"; }; + wrapper = mkDerivation { name = "use-dynamic-drv-in-non-dynamic-drv"; buildCommand = '' @@ -30,4 +32,12 @@ rec { cp -r ${builtins.outputOf producingDrv.outPath "out"} $out ''; }; + + failingWrapper = mkDerivation { + name = "use-dynamic-drv-in-non-dynamic-drv-wrong"; + buildCommand = '' + echo "Fail at copying the output of the dynamic derivation" + fail ${builtins.outputOf producingDrv.outPath "out"} $out + ''; + }; } diff --git a/tests/functional/fetchGit.sh b/tests/functional/fetchGit.sh index f3eda54dc..5e5e8e61f 100755 --- a/tests/functional/fetchGit.sh +++ b/tests/functional/fetchGit.sh @@ -65,7 +65,7 @@ git -C $repo add differentbranch git -C $repo commit -m 'Test2' git -C $repo checkout master devrev=$(git -C $repo rev-parse devtest) -nix eval --impure --raw --expr "builtins.fetchGit { url = file://$repo; rev = \"$devrev\"; }" +nix eval --raw --expr "builtins.fetchGit { url = file://$repo; rev = \"$devrev\"; }" [[ $(nix eval --raw --expr "builtins.readFile (builtins.fetchGit { url = file://$repo; rev = \"$devrev\"; allRefs = true; } + \"/differentbranch\")") = 'different file' ]] @@ -80,7 +80,7 @@ path2=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \" # In pure eval mode, fetchGit with a revision should succeed. [[ $(nix eval --raw --expr "builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\")") = world ]] -# But without a hash, it fails +# But without a hash, it fails. expectStderr 1 nix eval --expr 'builtins.fetchGit "file:///foo"' | grepQuiet "'fetchGit' will not fetch unlocked input" # Fetch again. This should be cached. @@ -142,13 +142,17 @@ path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchGit file://$rep [[ $(nix eval --impure --expr "builtins.hasAttr \"dirtyRev\" (builtins.fetchGit $repo)") == "false" ]] [[ $(nix eval --impure --expr "builtins.hasAttr \"dirtyShortRev\" (builtins.fetchGit $repo)") == "false" ]] -status=0 -nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-B5yIPHhEm0eysJKEsO7nqxprh9vcblFxpJG11gXJus1=\"; }).outPath" || status=$? -[[ "$status" = "102" ]] +expect 102 nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-B5yIPHhEm0eysJKEsO7nqxprh9vcblFxpJG11gXJus1=\"; }).outPath" -path5=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-Hr8g6AqANb3xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath") +path5=$(nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-Hr8g6AqANb3xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath") [[ $path = $path5 ]] +# Ensure that NAR hashes are checked. +expectStderr 102 nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-Hr8g6AqANb4xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath" | grepQuiet "error: NAR hash mismatch" + +# It's allowed to use only a narHash, but you should get a warning. +expectStderr 0 nix eval --raw --expr "(builtins.fetchGit { url = $repo; ref = \"tag2\"; narHash = \"sha256-Hr8g6AqANb3xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath" | grepQuiet "warning: Input .* is unlocked" + # tarball-ttl should be ignored if we specify a rev echo delft > $repo/hello git -C $repo add hello @@ -256,7 +260,7 @@ echo "/exported-wonky export-ignore=wonk" >> $repo/.gitattributes git -C $repo add not-exported-file exported-wonky .gitattributes git -C $repo commit -m 'Bla6' rev5=$(git -C $repo rev-parse HEAD) -path12=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"$rev5\"; }).outPath") +path12=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"$rev5\"; }).outPath") [[ ! -e $path12/not-exported-file ]] [[ -e $path12/exported-wonky ]] diff --git a/tests/functional/fixed.nix b/tests/functional/fixed.nix index 4097a6374..eab3ee707 100644 --- a/tests/functional/fixed.nix +++ b/tests/functional/fixed.nix @@ -72,4 +72,7 @@ rec { # Can use "nar" instead of "recursive" now. nar-not-recursive = f2 "foo" ./fixed.builder2.sh "nar" "md5" "3670af73070fa14077ad74e0f5ea4e42"; + + # Experimental feature + git = f2 "foo" ./fixed.builder2.sh "git" "sha1" "cd44baf36915d5dec8374232ea7e2057f3b4494e"; } diff --git a/tests/functional/flakes/debugger.sh b/tests/functional/flakes/debugger.sh new file mode 100644 index 000000000..f75f0ad39 --- /dev/null +++ b/tests/functional/flakes/debugger.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +source ./common.sh + +requireGit + +flakeDir="$TEST_ROOT/flake" +createGitRepo "$flakeDir" + +cat >"$flakeDir/flake.nix" <"$flake1Dir/flake.nix" < \$out ''; }; + stack-depth = + let + f = x: if x == 0 then true else f (x - 1); + in + assert (f 100); self.drv; ifd = assert (import self.drv); self.drv; }; } @@ -33,6 +38,12 @@ git -C "$flake1Dir" commit -m "Init" expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +# Stack overflow error must not be cached +expect 1 nix build --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ + | grepQuiet 'error: stack overflow; max-call-depth exceeded' +# If the SO is cached, the following invocation will produce a cached failure; we expect it to succeed +nix build --no-link "$flake1Dir#stack-depth" + # Conditional error should not be cached expect 1 nix build "$flake1Dir#ifd" --option allow-import-from-derivation false 2>&1 \ | grepQuiet 'error: cannot build .* during evaluation because the option '\''allow-import-from-derivation'\'' is disabled' diff --git a/tests/functional/flakes/flake-in-submodule.sh b/tests/functional/flakes/flake-in-submodule.sh index f98c19aa8..fe5acf26d 100755 --- a/tests/functional/flakes/flake-in-submodule.sh +++ b/tests/functional/flakes/flake-in-submodule.sh @@ -27,6 +27,7 @@ git config --global protocol.file.allow always rootRepo=$TEST_ROOT/rootRepo subRepo=$TEST_ROOT/submodule +otherRepo=$TEST_ROOT/otherRepo createGitRepo "$subRepo" @@ -74,9 +75,50 @@ EOF git -C "$rootRepo" add flake.nix git -C "$rootRepo" commit -m "Add flake.nix" -storePath=$(nix flake metadata --json "$rootRepo?submodules=1" | jq -r .path) +storePath=$(nix flake prefetch --json "$rootRepo?submodules=1" | jq -r .storePath) [[ -e "$storePath/submodule" ]] +# Test the use of inputs.self. +cat > "$rootRepo"/flake.nix < "$otherRepo"/flake.nix < "$flakeDir/flake.nix" < "$repoDir/subdir/flake.nix" < "$repoDir/file" + mkdir "$repoDir/subdir" + cat > "$repoDir/subdir/flake.nix" < "$repo2Dir/file" + git -C "$repo2Dir" add flake1_sym file + git -C "$repo2Dir" commit -m Initial + [[ $(nix eval "$repo2Dir/flake1_sym#x") == \"Hello\\n\" ]] + rm -rf "$TEST_ROOT/repo1" "$TEST_ROOT/repo2" +} +test_symlink_from_repo_to_another diff --git a/tests/functional/flakes/unlocked-override.sh b/tests/functional/flakes/unlocked-override.sh index dcb427a8f..512aca401 100755 --- a/tests/functional/flakes/unlocked-override.sh +++ b/tests/functional/flakes/unlocked-override.sh @@ -37,8 +37,8 @@ expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/f nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks -# Using a lock file with a dirty lock requires --allow-dirty-locks as well. -expectStderr 1 nix eval "$flake2Dir#x" | - grepQuiet "Lock file contains unlocked input" +# Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning. +expectStderr 0 nix eval "$flake2Dir#x" | + grepQuiet "warning: Lock file entry .* is unlocked" -[[ $(nix eval "$flake2Dir#x" --allow-dirty-locks) = 456 ]] +[[ $(nix eval "$flake2Dir#x") = 456 ]] diff --git a/tests/functional/git-hashing/fixed.sh b/tests/functional/git-hashing/fixed.sh new file mode 100755 index 000000000..f33d95cfa --- /dev/null +++ b/tests/functional/git-hashing/fixed.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +source common.sh + +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250122" + +nix-build ../fixed.nix -A git --no-out-link diff --git a/tests/functional/git-hashing/meson.build b/tests/functional/git-hashing/meson.build index 470c53fc5..d6a782cdc 100644 --- a/tests/functional/git-hashing/meson.build +++ b/tests/functional/git-hashing/meson.build @@ -3,6 +3,7 @@ suites += { 'deps': [], 'tests': [ 'simple.sh', + 'fixed.sh', ], 'workdir': meson.current_source_dir(), } diff --git a/tests/functional/help.sh b/tests/functional/help.sh index 127cc455b..e1ef75c41 100755 --- a/tests/functional/help.sh +++ b/tests/functional/help.sh @@ -2,6 +2,31 @@ source common.sh +function subcommands() { + jq -r ' +def recurse($prefix): + to_entries[] | + ($prefix + [.key]) as $newPrefix | + (if .value | has("commands") then + ($newPrefix, (.value.commands | recurse($newPrefix))) + else + $newPrefix + end); +.args.commands | recurse([]) | join(" ") +' +} + +nix __dump-cli | subcommands | while IFS= read -r cmd; do + # shellcheck disable=SC2086 # word splitting of cmd is intended + nix $cmd --help +done + +[[ $(type -p man) ]] || skipTest "'man' not installed" + +# FIXME: we don't know whether we built the manpages, so we can't +# reliably test them here. +skipTest "we don't know whether we built the manpages, so we can't reliably test them here." + # test help output nix-build --help @@ -49,22 +74,3 @@ nix-daemon --help nix-hash --help nix-instantiate --help nix-prefetch-url --help - -function subcommands() { - jq -r ' -def recurse($prefix): - to_entries[] | - ($prefix + [.key]) as $newPrefix | - (if .value | has("commands") then - ($newPrefix, (.value.commands | recurse($newPrefix))) - else - $newPrefix - end); -.args.commands | recurse([]) | join(" ") -' -} - -nix __dump-cli | subcommands | while IFS= read -r cmd; do - # shellcheck disable=SC2086 # word splitting of cmd is intended - nix $cmd --help -done diff --git a/tests/functional/lang/eval-fail-fetchTree-negative.err.exp b/tests/functional/lang/eval-fail-fetchTree-negative.err.exp index d9ba1f0b2..423123ca0 100644 --- a/tests/functional/lang/eval-fail-fetchTree-negative.err.exp +++ b/tests/functional/lang/eval-fail-fetchTree-negative.err.exp @@ -5,4 +5,4 @@ error: | ^ 2| type = "file"; - error: negative value given for fetchTree attr owner: -1 + error: negative value given for 'fetchTree' argument 'owner': -1 diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 03a07bc54..af95879fb 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -37,7 +37,7 @@ test_confdata = { # Done as a subdir() so Meson places it under `common` in the build directory as well. subdir('common') -config_nix_in = configure_file( +configure_file( input : 'config.nix.in', output : 'config.nix', configuration : test_confdata, @@ -243,8 +243,6 @@ foreach suite : suites # Used for target dependency/ordering tracking, not adding compiler flags or anything. depends : suite['deps'], workdir : workdir, - # Won't pass until man pages are generated - should_fail : suite['name'] == 'main' and script == 'help.sh' ) endforeach endforeach diff --git a/tests/functional/nix-channel.sh b/tests/functional/nix-channel.sh index 16d6a1355..d0b772850 100755 --- a/tests/functional/nix-channel.sh +++ b/tests/functional/nix-channel.sh @@ -68,4 +68,14 @@ nix-env -i dependencies-top [ -e $TEST_HOME/.nix-profile/foobar ] # Test evaluation through a channel symlink (#9882). -nix-instantiate '' +drvPath=$(nix-instantiate '') + +# Add a test for the special case behaviour of 'nixpkgs' in the +# channels for root (see EvalSettings::getDefaultNixPath()). +if ! isTestOnNixOS; then + nix-channel --add file://$TEST_ROOT/foo nixpkgs + nix-channel --update + mv $TEST_HOME/.local/state/nix/profiles $TEST_ROOT/var/nix/profiles/per-user/root + drvPath2=$(nix-instantiate '') + [[ "$drvPath" = "$drvPath2" ]] +fi diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 720b3688f..53807603c 100755 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -73,13 +73,13 @@ test_tarball .gz gzip # All entries in tree.tar.gz refer to the same file, and all have the same inode when unpacked by GNU tar. # We don't preserve the hard links, because that's an optimization we think is not worth the complexity, # so we only make sure that the contents are copied correctly. -path="$(nix flake prefetch --json "tarball+file://$(pwd)/tree.tar.gz" | jq -r .storePath)" -[[ $(cat "$path/a/b/foo") = bar ]] -[[ $(cat "$path/a/b/xyzzy") = bar ]] -[[ $(cat "$path/a/yyy") = bar ]] -[[ $(cat "$path/a/zzz") = bar ]] -[[ $(cat "$path/c/aap") = bar ]] -[[ $(cat "$path/fnord") = bar ]] +nix flake prefetch --json "tarball+file://$(pwd)/tree.tar.gz" --out-link "$TEST_ROOT/result" +[[ $(cat "$TEST_ROOT/result/a/b/foo") = bar ]] +[[ $(cat "$TEST_ROOT/result/a/b/xyzzy") = bar ]] +[[ $(cat "$TEST_ROOT/result/a/yyy") = bar ]] +[[ $(cat "$TEST_ROOT/result/a/zzz") = bar ]] +[[ $(cat "$TEST_ROOT/result/c/aap") = bar ]] +[[ $(cat "$TEST_ROOT/result/fnord") = bar ]] # Test a tarball that has multiple top-level directories. rm -rf "$TEST_ROOT/tar_root" diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index ca72034ec..92f89d8db 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -2,7 +2,7 @@ lib, nixpkgs, nixpkgsFor, - self, + nixpkgs-23-11, }: let @@ -85,7 +85,7 @@ let { imports = [ checkOverrideNixVersion ]; nix.package = lib.mkForce ( - self.inputs.nixpkgs-23-11.legacyPackages.${pkgs.stdenv.hostPlatform.system}.nixVersions.nix_2_13.overrideAttrs + nixpkgs-23-11.legacyPackages.${pkgs.stdenv.hostPlatform.system}.nixVersions.nix_2_13.overrideAttrs (o: { meta = o.meta // { knownVulnerabilities = [ ]; diff --git a/tests/nixos/fetch-git/test-cases/lfs/default.nix b/tests/nixos/fetch-git/test-cases/lfs/default.nix new file mode 100644 index 000000000..686796fcc --- /dev/null +++ b/tests/nixos/fetch-git/test-cases/lfs/default.nix @@ -0,0 +1,228 @@ +{ + # mostly copied from https://github.com/NixOS/nix/blob/358c26fd13a902d9a4032a00e6683571be07a384/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix#L1 + # ty @DavHau + description = "fetchGit smudges LFS pointers if lfs=true"; + script = '' + from tempfile import TemporaryDirectory + + expected_max_size_lfs_pointer = 1024 # 1 KiB (values >= than this cannot be pointers, and test files are 1 MiB) + + # purge nix git cache to make sure we start with a clean slate + client.succeed("rm -rf ~/.cache/nix") + + + with subtest("Request lfs fetch without any .gitattributes file"): + client.succeed(f"dd if=/dev/urandom of={repo.path}/regular bs=1M count=1 >&2") + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'no .gitattributes' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + no_gitattributes_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + # fetch with lfs=true, and check that the lack of .gitattributes does not break anything + fetchGit_no_gitattributes_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{no_gitattributes_rev}"; + ref = "main"; + lfs = true; + }} + """ + fetched_no_gitattributes = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_no_gitattributes_expr}).outPath' + """) + client.succeed(f"cmp {repo.path}/regular {fetched_no_gitattributes}/regular >&2") + + + with subtest("Add a file that should be tracked by lfs, but isn't"): + # (git lfs cli only throws a warning "Encountered 1 file that should have + # been a pointer, but wasn't") + + client.succeed(f"dd if=/dev/urandom of={repo.path}/black_sheep bs=1M count=1 >&2") + client.succeed(f"echo 'black_sheep filter=lfs -text' >>{repo.path}/.gitattributes") + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'add misleading file' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + bad_lfs_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + # test assumption that it can be cloned with regular git first + # (here we see the warning as stated above) + with TemporaryDirectory() as tempdir: + client.succeed(f"git clone -n {repo.remote} {tempdir} >&2") + client.succeed(f"git -C {tempdir} lfs install >&2") + client.succeed(f"git -C {tempdir} checkout {bad_lfs_rev} >&2") + + # check that the file is not a pointer, as expected + file_size_git = client.succeed(f"stat -c %s {tempdir}/black_sheep").strip() + assert int(file_size_git) == 1024 * 1024, \ + f"non lfs file is {file_size_git}b (!= 1MiB), probably a test implementation error" + + lfs_files = client.succeed(f"git -C {tempdir} lfs ls-files").strip() + assert lfs_files == "", "non lfs file is tracked by lfs, probably a test implementation error" + + client.succeed(f"cmp {repo.path}/black_sheep {tempdir}/black_sheep >&2") + + # now fetch without lfs, check that the file is not a pointer + fetchGit_bad_lfs_without_lfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{bad_lfs_rev}"; + ref = "main"; + lfs = false; + }} + """ + fetched_bad_lfs_without_lfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_bad_lfs_without_lfs_expr}).outPath' + """) + + # check that file was not somehow turned into a pointer + file_size_bad_lfs_without_lfs = client.succeed(f"stat -c %s {fetched_bad_lfs_without_lfs}/black_sheep").strip() + + assert int(file_size_bad_lfs_without_lfs) == 1024 * 1024, \ + f"non lfs-enrolled file is {file_size_bad_lfs_without_lfs}b (!= 1MiB), probably a test implementation error" + client.succeed(f"cmp {repo.path}/black_sheep {fetched_bad_lfs_without_lfs}/black_sheep >&2") + + # finally fetch with lfs=true, and check that the bad file does not break anything + fetchGit_bad_lfs_with_lfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{bad_lfs_rev}"; + ref = "main"; + lfs = true; + }} + """ + fetchGit_bad_lfs_with_lfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_bad_lfs_with_lfs_expr}).outPath' + """) + + client.succeed(f"cmp {repo.path}/black_sheep {fetchGit_bad_lfs_with_lfs}/black_sheep >&2") + + + with subtest("Add an lfs-enrolled file to the repo"): + client.succeed(f"dd if=/dev/urandom of={repo.path}/beeg bs=1M count=1 >&2") + client.succeed(f"{repo.git} lfs install >&2") + client.succeed(f"{repo.git} lfs track --filename beeg >&2") + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'add lfs file' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + lfs_file_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + # first fetch without lfs, check that we did not smudge the file + fetchGit_nolfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{lfs_file_rev}"; + ref = "main"; + lfs = false; + }} + """ + fetched_nolfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_nolfs_expr}).outPath' + """) + + # check that file was not smudged + file_size_nolfs = client.succeed(f"stat -c %s {fetched_nolfs}/beeg").strip() + + assert int(file_size_nolfs) < expected_max_size_lfs_pointer, \ + f"did not set lfs=true, yet lfs-enrolled file is {file_size_nolfs}b (>= 1KiB), probably smudged when we should not have" + + # now fetch with lfs=true and check that the file was smudged + fetchGit_lfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{lfs_file_rev}"; + ref = "main"; + lfs = true; + }} + """ + fetched_lfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_lfs_expr}).outPath' + """) + + assert fetched_lfs != fetched_nolfs, \ + f"fetching with and without lfs yielded the same store path {fetched_lfs}, fingerprinting error?" + + # check that file was smudged + file_size_lfs = client.succeed(f"stat -c %s {fetched_lfs}/beeg").strip() + assert int(file_size_lfs) == 1024 * 1024, \ + f"set lfs=true, yet lfs-enrolled file is {file_size_lfs}b (!= 1MiB), probably did not smudge when we should have" + + + with subtest("Check that default is lfs=false"): + fetchGit_default_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{lfs_file_rev}"; + ref = "main"; + }} + """ + fetched_default = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_default_expr}).outPath' + """) + + # check that file was not smudged + file_size_default = client.succeed(f"stat -c %s {fetched_default}/beeg").strip() + + assert int(file_size_default) < expected_max_size_lfs_pointer, \ + f"did not set lfs, yet lfs-enrolled file is {file_size_default}b (>= 1KiB), probably bad default value" + + with subtest("Use as flake input"): + # May seem reduntant, but this has minor differences compared to raw + # fetchGit which caused failures before + with TemporaryDirectory() as tempdir: + client.succeed(f"mkdir -p {tempdir}") + client.succeed(f""" + printf '{{ + inputs = {{ + foo = {{ + url = "git+{repo.remote}?ref=main&rev={lfs_file_rev}&lfs=1"; + flake = false; + }}; + }}; + outputs = {{ foo, self }}: {{ inherit (foo) outPath; }}; + }}' >{tempdir}/flake.nix + """) + fetched_flake = client.succeed(f""" + nix eval --debug --raw {tempdir}#.outPath + """) + + assert fetched_lfs == fetched_flake, \ + f"fetching as flake input (store path {fetched_flake}) yielded a different result than using fetchGit (store path {fetched_lfs})" + + + with subtest("Check self.lfs"): + client.succeed(f""" + printf '{{ + inputs.self.lfs = true; + outputs = {{ self }}: {{ }}; + }}' >{repo.path}/flake.nix + """) + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'add flake' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + self_lfs_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + with TemporaryDirectory() as tempdir: + client.succeed(f"mkdir -p {tempdir}") + client.succeed(f""" + printf '{{ + inputs.foo = {{ + url = "git+{repo.remote}?ref=main&rev={self_lfs_rev}"; + }}; + outputs = {{ foo, self }}: {{ inherit (foo) outPath; }}; + }}' >{tempdir}/flake.nix + """) + fetched_self_lfs = client.succeed(f""" + nix eval --debug --raw {tempdir}#.outPath + """) + + client.succeed(f"cmp {repo.path}/beeg {fetched_self_lfs}/beeg >&2") + ''; +} diff --git a/tests/nixos/fetch-git/testsupport/gitea.nix b/tests/nixos/fetch-git/testsupport/gitea.nix index 9409acff7..e63182639 100644 --- a/tests/nixos/fetch-git/testsupport/gitea.nix +++ b/tests/nixos/fetch-git/testsupport/gitea.nix @@ -29,9 +29,16 @@ in { pkgs, ... }: { services.gitea.enable = true; - services.gitea.settings.service.DISABLE_REGISTRATION = true; - services.gitea.settings.log.LEVEL = "Info"; - services.gitea.settings.database.LOG_SQL = false; + services.gitea.lfs.enable = true; + services.gitea.settings = { + service.DISABLE_REGISTRATION = true; + server = { + DOMAIN = "gitea"; + HTTP_PORT = 3000; + }; + log.LEVEL = "Info"; + database.LOG_SQL = false; + }; services.openssh.enable = true; networking.firewall.allowedTCPPorts = [ 3000 ]; environment.systemPackages = [ @@ -54,7 +61,10 @@ in client = { pkgs, ... }: { - environment.systemPackages = [ pkgs.git ]; + environment.systemPackages = [ + pkgs.git + pkgs.git-lfs + ]; }; }; defaults =