mirror of
https://github.com/NixOS/nix
synced 2025-06-28 22:01:15 +02:00
Merge remote-tracking branch 'origin/master' into lazy-trees
This commit is contained in:
commit
bb421ac80b
108 changed files with 1242 additions and 452 deletions
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -21,8 +21,8 @@ Maintainers: tick if completed or explain if not relevant
|
||||||
- [ ] tests, as appropriate
|
- [ ] tests, as appropriate
|
||||||
- functional tests - `tests/**.sh`
|
- functional tests - `tests/**.sh`
|
||||||
- unit tests - `src/*/tests`
|
- unit tests - `src/*/tests`
|
||||||
- integration tests
|
- integration tests - `tests/nixos/*`
|
||||||
- [ ] documentation in the manual
|
- [ ] documentation in the manual
|
||||||
- [ ] code and comments are self-explanatory
|
- [ ] code and comments are self-explanatory
|
||||||
- [ ] commit message explains why the change was made
|
- [ ] commit message explains why the change was made
|
||||||
- [ ] new feature or bug fix: updated release notes
|
- [ ] new feature or incompatible change: updated release notes
|
||||||
|
|
2
.github/workflows/backport.yml
vendored
2
.github/workflows/backport.yml
vendored
|
@ -27,6 +27,6 @@ jobs:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
github_workspace: ${{ github.workspace }}
|
github_workspace: ${{ github.workspace }}
|
||||||
pull_description: |-
|
pull_description: |-
|
||||||
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
|
Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}.
|
||||||
# should be kept in sync with `uses`
|
# should be kept in sync with `uses`
|
||||||
version: v0.0.5
|
version: v0.0.5
|
||||||
|
|
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
|
@ -19,7 +19,7 @@ jobs:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v18
|
- uses: cachix/install-nix-action@v19
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- uses: cachix/cachix-action@v12
|
- uses: cachix/cachix-action@v12
|
||||||
if: needs.check_secrets.outputs.cachix == 'true'
|
if: needs.check_secrets.outputs.cachix == 'true'
|
||||||
|
@ -58,7 +58,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- uses: cachix/install-nix-action@v18
|
- uses: cachix/install-nix-action@v19
|
||||||
- uses: cachix/cachix-action@v12
|
- uses: cachix/cachix-action@v12
|
||||||
with:
|
with:
|
||||||
name: '${{ env.CACHIX_NAME }}'
|
name: '${{ env.CACHIX_NAME }}'
|
||||||
|
@ -77,7 +77,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- uses: cachix/install-nix-action@v18
|
- uses: cachix/install-nix-action@v19
|
||||||
with:
|
with:
|
||||||
install_url: '${{needs.installer.outputs.installerURL}}'
|
install_url: '${{needs.installer.outputs.installerURL}}'
|
||||||
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
|
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
|
||||||
|
@ -102,7 +102,7 @@ jobs:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v18
|
- uses: cachix/install-nix-action@v19
|
||||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV
|
- run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV
|
||||||
- uses: cachix/cachix-action@v12
|
- uses: cachix/cachix-action@v12
|
||||||
|
|
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -37,14 +37,14 @@ perl/Makefile.config
|
||||||
/src/libexpr/parser-tab.hh
|
/src/libexpr/parser-tab.hh
|
||||||
/src/libexpr/parser-tab.output
|
/src/libexpr/parser-tab.output
|
||||||
/src/libexpr/nix.tbl
|
/src/libexpr/nix.tbl
|
||||||
/src/libexpr/tests/libexpr-tests
|
/src/libexpr/tests/libnixexpr-tests
|
||||||
|
|
||||||
# /src/libstore/
|
# /src/libstore/
|
||||||
*.gen.*
|
*.gen.*
|
||||||
/src/libstore/tests/libstore-tests
|
/src/libstore/tests/libnixstore-tests
|
||||||
|
|
||||||
# /src/libutil/
|
# /src/libutil/
|
||||||
/src/libutil/tests/libutil-tests
|
/src/libutil/tests/libnixutil-tests
|
||||||
|
|
||||||
/src/nix/nix
|
/src/nix/nix
|
||||||
|
|
||||||
|
|
|
@ -276,8 +276,11 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
|
||||||
|
|
||||||
# Look for rapidcheck.
|
# Look for rapidcheck.
|
||||||
# No pkg-config yet, https://github.com/emil-e/rapidcheck/issues/302
|
# No pkg-config yet, https://github.com/emil-e/rapidcheck/issues/302
|
||||||
|
AC_LANG_PUSH(C++)
|
||||||
AC_CHECK_HEADERS([rapidcheck/gtest.h], [], [], [#include <gtest/gtest.h>])
|
AC_CHECK_HEADERS([rapidcheck/gtest.h], [], [], [#include <gtest/gtest.h>])
|
||||||
AC_CHECK_LIB([rapidcheck], [])
|
dnl No good for C++ libs with mangled symbols
|
||||||
|
dnl AC_CHECK_LIB([rapidcheck], [])
|
||||||
|
AC_LANG_POP(C++)
|
||||||
|
|
||||||
|
|
||||||
# Look for nlohmann/json.
|
# Look for nlohmann/json.
|
||||||
|
|
|
@ -633,7 +633,7 @@ written to standard output.
|
||||||
|
|
||||||
A NAR archive is like a TAR or Zip archive, but it contains only the
|
A NAR archive is like a TAR or Zip archive, but it contains only the
|
||||||
information that Nix considers important. For instance, timestamps are
|
information that Nix considers important. For instance, timestamps are
|
||||||
elided because all files in the Nix store have their timestamp set to 0
|
elided because all files in the Nix store have their timestamp set to 1
|
||||||
anyway. Likewise, all permissions are left out except for the execute
|
anyway. Likewise, all permissions are left out except for the execute
|
||||||
bit, because all files in the Nix store have 444 or 555 permission.
|
bit, because all files in the Nix store have 444 or 555 permission.
|
||||||
|
|
||||||
|
|
|
@ -45,13 +45,13 @@ To get a shell with a different compilation environment (e.g. stdenv,
|
||||||
gccStdenv, clangStdenv, clang11Stdenv, ccacheStdenv):
|
gccStdenv, clangStdenv, clang11Stdenv, ccacheStdenv):
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-shell -A devShells.x86_64-linux.clang11StdenvPackages
|
$ nix-shell -A devShells.x86_64-linux.clang11Stdenv
|
||||||
```
|
```
|
||||||
|
|
||||||
or if you have a flake-enabled nix:
|
or if you have a flake-enabled nix:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix develop .#clang11StdenvPackages
|
$ nix develop .#clang11Stdenv
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: you can use `ccacheStdenv` to drastically improve rebuild
|
Note: you can use `ccacheStdenv` to drastically improve rebuild
|
||||||
|
@ -219,7 +219,7 @@ After the CI run completes, you can check the output to extract the installer UR
|
||||||
5. To generate an install command, plug this `install_url` and your GitHub username into this template:
|
5. To generate an install command, plug this `install_url` and your GitHub username into this template:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
sh <(curl -L <install_url>) --tarball-url-prefix https://<github-username>-nix-install-tests.cachix.org/serve
|
curl -L <install_url> | sh -s -- --tarball-url-prefix https://<github-username>-nix-install-tests.cachix.org/serve
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- #### Manually generating test installers
|
<!-- #### Manually generating test installers
|
||||||
|
|
|
@ -19,6 +19,13 @@
|
||||||
|
|
||||||
[store derivation]: #gloss-store-derivation
|
[store derivation]: #gloss-store-derivation
|
||||||
|
|
||||||
|
- [instantiate]{#gloss-instantiate}, instantiation\
|
||||||
|
Translate a [derivation] into a [store derivation].
|
||||||
|
|
||||||
|
See [`nix-instantiate`](./command-ref/nix-instantiate.md).
|
||||||
|
|
||||||
|
[instantiate]: #gloss-instantiate
|
||||||
|
|
||||||
- [realise]{#gloss-realise}, realisation\
|
- [realise]{#gloss-realise}, realisation\
|
||||||
Ensure a [store path] is [valid][validity].
|
Ensure a [store path] is [valid][validity].
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ Set the environment variable and install Nix
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
|
$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
|
||||||
$ sh <(curl -L https://nixos.org/nix/install)
|
$ curl -L https://nixos.org/nix/install | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
In the shell profile and rc files (for example, `/etc/bashrc`,
|
In the shell profile and rc files (for example, `/etc/bashrc`,
|
||||||
|
|
|
@ -1,2 +1,38 @@
|
||||||
This section describes how to install and configure Nix for first-time
|
# Installation
|
||||||
use.
|
|
||||||
|
This section describes how to install and configure Nix for first-time use.
|
||||||
|
|
||||||
|
The current recommended option on Linux and MacOS is [multi-user](#multi-user).
|
||||||
|
|
||||||
|
## Multi-user
|
||||||
|
|
||||||
|
This installation offers better sharing, improved isolation, and more security
|
||||||
|
over a single user installation.
|
||||||
|
|
||||||
|
This option requires either:
|
||||||
|
|
||||||
|
* Linux running systemd, with SELinux disabled
|
||||||
|
* MacOS
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ bash <(curl -L https://nixos.org/nix/install) --daemon
|
||||||
|
```
|
||||||
|
|
||||||
|
## Single-user
|
||||||
|
|
||||||
|
> Single-user is not supported on Mac.
|
||||||
|
|
||||||
|
This installation has less requirements than the multi-user install, however it
|
||||||
|
cannot offer equivalent sharing, isolation, or security.
|
||||||
|
|
||||||
|
This option is suitable for systems without systemd.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ bash <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||||
|
```
|
||||||
|
|
||||||
|
## Distributions
|
||||||
|
|
||||||
|
The Nix community maintains installers for several distributions.
|
||||||
|
|
||||||
|
They can be found in the [`nix-community/nix-installers`](https://github.com/nix-community/nix-installers) repository.
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
The easiest way to install Nix is to run the following command:
|
The easiest way to install Nix is to run the following command:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ sh <(curl -L https://nixos.org/nix/install)
|
$ curl -L https://nixos.org/nix/install | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This will run the installer interactively (causing it to explain what
|
This will run the installer interactively (causing it to explain what
|
||||||
|
@ -27,7 +27,7 @@ you can authenticate with `sudo`.
|
||||||
To explicitly select a single-user installation on your system:
|
To explicitly select a single-user installation on your system:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
$ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon
|
||||||
```
|
```
|
||||||
|
|
||||||
This will perform a single-user installation of Nix, meaning that `/nix`
|
This will perform a single-user installation of Nix, meaning that `/nix`
|
||||||
|
@ -66,7 +66,7 @@ You can instruct the installer to perform a multi-user installation on
|
||||||
your system:
|
your system:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ sh <(curl -L https://nixos.org/nix/install) --daemon
|
$ curl -L https://nixos.org/nix/install | sh -s -- --daemon
|
||||||
```
|
```
|
||||||
|
|
||||||
The multi-user installation of Nix will create build users between the
|
The multi-user installation of Nix will create build users between the
|
||||||
|
@ -287,7 +287,7 @@ These install scripts can be used the same as the main NixOS.org
|
||||||
installation script:
|
installation script:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ sh <(curl -L https://nixos.org/nix/install)
|
$ curl -L https://nixos.org/nix/install | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
In the same directory of the install script are sha256 sums, and gpg
|
In the same directory of the install script are sha256 sums, and gpg
|
||||||
|
|
|
@ -212,7 +212,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
If this **experimental** attribute is set to true, then the derivation
|
If this **experimental** attribute is set to true, then the derivation
|
||||||
outputs will be stored in a content-addressed location rather than the
|
outputs will be stored in a content-addressed location rather than the
|
||||||
traditional input-addressed one.
|
traditional input-addressed one.
|
||||||
This only has an effect if the `ca-derivation` experimental feature is enabled.
|
This only has an effect if the `ca-derivations` experimental feature is enabled.
|
||||||
|
|
||||||
Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
|
Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
|
||||||
|
|
||||||
|
@ -255,3 +255,78 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
> substituted. Thus it is usually a good idea to align `system` with
|
> substituted. Thus it is usually a good idea to align `system` with
|
||||||
> `builtins.currentSystem` when setting `allowSubstitutes` to
|
> `builtins.currentSystem` when setting `allowSubstitutes` to
|
||||||
> `false`. For most trivial derivations this should be the case.
|
> `false`. For most trivial derivations this should be the case.
|
||||||
|
|
||||||
|
- [`__structuredAttrs`]{#adv-attr-structuredAttrs}\
|
||||||
|
If the special attribute `__structuredAttrs` is set to `true`, the other derivation
|
||||||
|
attributes are serialised in JSON format and made available to the
|
||||||
|
builder via the file `.attrs.json` in the builder’s temporary
|
||||||
|
directory. This obviates the need for [`passAsFile`](#adv-attr-passAsFile) since JSON files
|
||||||
|
have no size restrictions, unlike process environments.
|
||||||
|
|
||||||
|
It also makes it possible to tweak derivation settings in a structured way; see
|
||||||
|
[`outputChecks`](#adv-attr-outputChecks) for example.
|
||||||
|
|
||||||
|
As a convenience to Bash builders,
|
||||||
|
Nix writes a script named `.attrs.sh` to the builder’s directory
|
||||||
|
that initialises shell variables corresponding to all attributes
|
||||||
|
that are representable in Bash. This includes non-nested
|
||||||
|
(associative) arrays. For example, the attribute `hardening.format = true`
|
||||||
|
ends up as the Bash associative array element `${hardening[format]}`.
|
||||||
|
|
||||||
|
- [`outputChecks`]{#adv-attr-outputChecks}\
|
||||||
|
When using [structured attributes](#adv-attr-structuredAttrs), the `outputChecks`
|
||||||
|
attribute allows defining checks per-output.
|
||||||
|
|
||||||
|
In addition to
|
||||||
|
[`allowedReferences`](#adv-attr-allowedReferences), [`allowedRequisites`](#adv-attr-allowedRequisites),
|
||||||
|
[`disallowedReferences`](#adv-attr-disallowedReferences) and [`disallowedRequisites`](#adv-attr-disallowedRequisites),
|
||||||
|
the following attributes are available:
|
||||||
|
|
||||||
|
- `maxSize` defines the maximum size of the resulting [store object](../glossary.md#gloss-store-object).
|
||||||
|
- `maxClosureSize` defines the maximum size of the output's closure.
|
||||||
|
- `ignoreSelfRefs` controls whether self-references should be considered when
|
||||||
|
checking for allowed references/requisites.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
__structuredAttrs = true;
|
||||||
|
|
||||||
|
outputChecks.out = {
|
||||||
|
# The closure of 'out' must not be larger than 256 MiB.
|
||||||
|
maxClosureSize = 256 * 1024 * 1024;
|
||||||
|
|
||||||
|
# It must not refer to the C compiler or to the 'dev' output.
|
||||||
|
disallowedRequisites = [ stdenv.cc "dev" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
outputChecks.dev = {
|
||||||
|
# The 'dev' output must not be larger than 128 KiB.
|
||||||
|
maxSize = 128 * 1024;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
- [`unsafeDiscardReferences`]{#adv-attr-unsafeDiscardReferences}\
|
||||||
|
> **Warning**
|
||||||
|
> This is an experimental feature.
|
||||||
|
>
|
||||||
|
> To enable it, add the following to [nix.conf](../command-ref/conf-file.md):
|
||||||
|
>
|
||||||
|
> ```
|
||||||
|
> extra-experimental-features = discard-references
|
||||||
|
> ```
|
||||||
|
|
||||||
|
When using [structured attributes](#adv-attr-structuredAttrs), the
|
||||||
|
attribute `unsafeDiscardReferences` is an attribute set with a boolean value for each output name.
|
||||||
|
If set to `true`, it disables scanning the output for runtime dependencies.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
__structuredAttrs = true;
|
||||||
|
unsafeDiscardReferences.out = true;
|
||||||
|
```
|
||||||
|
|
||||||
|
This is useful, for example, when generating self-contained filesystem images with
|
||||||
|
their own embedded Nix store: hashes found inside such an image refer
|
||||||
|
to the embedded store and not to the host's Nix store.
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
| [Equality] | *expr* `==` *expr* | none | 11 |
|
| [Equality] | *expr* `==` *expr* | none | 11 |
|
||||||
| Inequality | *expr* `!=` *expr* | none | 11 |
|
| Inequality | *expr* `!=` *expr* | none | 11 |
|
||||||
| Logical conjunction (`AND`) | *bool* `&&` *bool* | left | 12 |
|
| Logical conjunction (`AND`) | *bool* `&&` *bool* | left | 12 |
|
||||||
| Logical disjunction (`OR`) | *bool* `\|\|` *bool* | left | 13 |
|
| Logical disjunction (`OR`) | *bool* <code>\|\|</code> *bool* | left | 13 |
|
||||||
| [Logical implication] | *bool* `->` *bool* | none | 14 |
|
| [Logical implication] | *bool* `->` *bool* | none | 14 |
|
||||||
|
|
||||||
[string]: ./values.md#type-string
|
[string]: ./values.md#type-string
|
||||||
|
@ -116,7 +116,7 @@ The result is a string.
|
||||||
[store path]: ../glossary.md#gloss-store-path
|
[store path]: ../glossary.md#gloss-store-path
|
||||||
[store]: ../glossary.md#gloss-store
|
[store]: ../glossary.md#gloss-store
|
||||||
|
|
||||||
[Path and string concatenation]: #path-and-string-concatenation
|
[String and path concatenation]: #string-and-path-concatenation
|
||||||
|
|
||||||
## Update
|
## Update
|
||||||
|
|
||||||
|
|
|
@ -4,16 +4,16 @@ This chapter is for impatient people who don't like reading
|
||||||
documentation. For more in-depth information you are kindly referred
|
documentation. For more in-depth information you are kindly referred
|
||||||
to subsequent chapters.
|
to subsequent chapters.
|
||||||
|
|
||||||
1. Install single-user Nix by running the following:
|
1. Install Nix by running the following:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ bash <(curl -L https://nixos.org/nix/install)
|
$ curl -L https://nixos.org/nix/install | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This will install Nix in `/nix`. The install script will create
|
The install script will use `sudo`, so make sure you have sufficient rights.
|
||||||
`/nix` using `sudo`, so make sure you have sufficient rights. (For
|
On Linux, `--daemon` can be omitted for a single-user install.
|
||||||
other installation methods, see
|
|
||||||
[here](installation/installation.md).)
|
For other installation methods, see [here](installation/installation.md).
|
||||||
|
|
||||||
1. See what installable packages are currently available in the
|
1. See what installable packages are currently available in the
|
||||||
channel:
|
channel:
|
||||||
|
|
|
@ -8,3 +8,15 @@
|
||||||
discovered by making multiple syscalls. This change makes these operations
|
discovered by making multiple syscalls. This change makes these operations
|
||||||
lazy such that these lookups will only be performed if the attribute is used.
|
lazy such that these lookups will only be performed if the attribute is used.
|
||||||
This optimization affects a minority of filesystems and operating systems.
|
This optimization affects a minority of filesystems and operating systems.
|
||||||
|
|
||||||
|
* In derivations that use structured attributes, you can now use `unsafeDiscardReferences`
|
||||||
|
to disable scanning a given output for runtime dependencies:
|
||||||
|
```nix
|
||||||
|
__structuredAttrs = true;
|
||||||
|
unsafeDiscardReferences.out = true;
|
||||||
|
```
|
||||||
|
This is useful e.g. when generating self-contained filesystem images with
|
||||||
|
their own embedded Nix store: hashes found inside such an image refer
|
||||||
|
to the embedded store and not to the host's Nix store.
|
||||||
|
|
||||||
|
This requires the `discard-references` experimental feature.
|
||||||
|
|
55
flake.nix
55
flake.nix
|
@ -410,6 +410,18 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
nixos-lib = import (nixpkgs + "/nixos/lib") { };
|
||||||
|
|
||||||
|
# https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
|
||||||
|
runNixOSTestFor = system: test: nixos-lib.runTest {
|
||||||
|
imports = [ test ];
|
||||||
|
hostPkgs = nixpkgsFor.${system};
|
||||||
|
defaults = {
|
||||||
|
nixpkgs.pkgs = nixpkgsFor.${system};
|
||||||
|
};
|
||||||
|
_module.args.nixpkgs = nixpkgs;
|
||||||
|
};
|
||||||
|
|
||||||
in {
|
in {
|
||||||
|
|
||||||
# A Nixpkgs overlay that overrides the 'nix' and
|
# A Nixpkgs overlay that overrides the 'nix' and
|
||||||
|
@ -488,49 +500,22 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
# System tests.
|
# System tests.
|
||||||
tests.remoteBuilds = import ./tests/remote-builds.nix {
|
tests.remoteBuilds = runNixOSTestFor "x86_64-linux" ./tests/nixos/remote-builds.nix;
|
||||||
system = "x86_64-linux";
|
|
||||||
inherit nixpkgs;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
};
|
|
||||||
|
|
||||||
tests.nix-copy-closure = import ./tests/nix-copy-closure.nix {
|
tests.nix-copy-closure = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy-closure.nix;
|
||||||
system = "x86_64-linux";
|
|
||||||
inherit nixpkgs;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
};
|
|
||||||
|
|
||||||
tests.nssPreload = (import ./tests/nss-preload.nix rec {
|
tests.nssPreload = runNixOSTestFor "x86_64-linux" ./tests/nixos/nss-preload.nix;
|
||||||
system = "x86_64-linux";
|
|
||||||
inherit nixpkgs;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.githubFlakes = (import ./tests/github-flakes.nix rec {
|
tests.githubFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/github-flakes.nix;
|
||||||
system = "x86_64-linux";
|
|
||||||
inherit nixpkgs;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.sourcehutFlakes = (import ./tests/sourcehut-flakes.nix rec {
|
tests.sourcehutFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/sourcehut-flakes.nix;
|
||||||
system = "x86_64-linux";
|
|
||||||
inherit nixpkgs;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.containers = (import ./tests/containers.nix rec {
|
tests.containers = runNixOSTestFor "x86_64-linux" ./tests/nixos/containers/containers.nix;
|
||||||
system = "x86_64-linux";
|
|
||||||
inherit nixpkgs;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.setuid = nixpkgs.lib.genAttrs
|
tests.setuid = nixpkgs.lib.genAttrs
|
||||||
["i686-linux" "x86_64-linux"]
|
["i686-linux" "x86_64-linux"]
|
||||||
(system:
|
(system: runNixOSTestFor system ./tests/nixos/setuid.nix);
|
||||||
import ./tests/setuid.nix rec {
|
|
||||||
inherit nixpkgs system;
|
|
||||||
overlay = self.overlays.default;
|
|
||||||
});
|
|
||||||
|
|
||||||
# Make sure that nix-env still produces the exact same result
|
# Make sure that nix-env still produces the exact same result
|
||||||
# on a particular version of Nixpkgs.
|
# on a particular version of Nixpkgs.
|
||||||
|
|
12
maintainers/backporting.md
Normal file
12
maintainers/backporting.md
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
|
||||||
|
# Backporting
|
||||||
|
|
||||||
|
To [automatically backport a pull request](https://github.com/NixOS/nix/blob/master/.github/workflows/backport.yml) to a release branch once it's merged, assign it a label of the form [`backport <branch>`](https://github.com/NixOS/nix/labels?q=backport).
|
||||||
|
|
||||||
|
Since [GitHub Actions workflows will not trigger other workflows](https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow), checks on the automatic backport need to be triggered by another actor.
|
||||||
|
This is achieved by closing and reopening the backport pull request.
|
||||||
|
|
||||||
|
This specifically affects the [`installer_test`] check.
|
||||||
|
Note that it only runs after the other tests, so it may take a while to appear.
|
||||||
|
|
||||||
|
[`installer_test`]: https://github.com/NixOS/nix/blob/895dfc656a21f6252ddf48df0d1f215effa04ecb/.github/workflows/ci.yml#L70-L91
|
|
@ -3,6 +3,9 @@ programs-list :=
|
||||||
# Build a program with symbolic name $(1). The program is defined by
|
# Build a program with symbolic name $(1). The program is defined by
|
||||||
# various variables prefixed by ‘$(1)_’:
|
# various variables prefixed by ‘$(1)_’:
|
||||||
#
|
#
|
||||||
|
# - $(1)_NAME: the name of the program (e.g. ‘foo’); defaults to
|
||||||
|
# $(1).
|
||||||
|
#
|
||||||
# - $(1)_DIR: the directory where the (non-installed) program will be
|
# - $(1)_DIR: the directory where the (non-installed) program will be
|
||||||
# placed.
|
# placed.
|
||||||
#
|
#
|
||||||
|
@ -23,11 +26,12 @@ programs-list :=
|
||||||
# - $(1)_INSTALL_DIR: the directory where the program will be
|
# - $(1)_INSTALL_DIR: the directory where the program will be
|
||||||
# installed; defaults to $(bindir).
|
# installed; defaults to $(bindir).
|
||||||
define build-program
|
define build-program
|
||||||
|
$(1)_NAME ?= $(1)
|
||||||
_d := $(buildprefix)$$($(1)_DIR)
|
_d := $(buildprefix)$$($(1)_DIR)
|
||||||
_srcs := $$(sort $$(foreach src, $$($(1)_SOURCES), $$(src)))
|
_srcs := $$(sort $$(foreach src, $$($(1)_SOURCES), $$(src)))
|
||||||
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
|
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
|
||||||
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
|
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
|
||||||
$(1)_PATH := $$(_d)/$(1)
|
$(1)_PATH := $$(_d)/$$($(1)_NAME)
|
||||||
|
|
||||||
$$(eval $$(call create-dir, $$(_d)))
|
$$(eval $$(call create-dir, $$(_d)))
|
||||||
|
|
||||||
|
@ -38,7 +42,7 @@ define build-program
|
||||||
|
|
||||||
ifdef $(1)_INSTALL_DIR
|
ifdef $(1)_INSTALL_DIR
|
||||||
|
|
||||||
$(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$(1)
|
$(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$$($(1)_NAME)
|
||||||
|
|
||||||
$$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
|
$$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
makefiles = local.mk
|
makefiles = local.mk
|
||||||
|
|
||||||
GLOBAL_CXXFLAGS += -g -Wall -std=c++17 -I ../src
|
GLOBAL_CXXFLAGS += -g -Wall -std=c++20 -I ../src
|
||||||
|
|
||||||
-include Makefile.config
|
-include Makefile.config
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ static ref<Store> store()
|
||||||
static std::shared_ptr<Store> _store;
|
static std::shared_ptr<Store> _store;
|
||||||
if (!_store) {
|
if (!_store) {
|
||||||
try {
|
try {
|
||||||
|
initLibStore();
|
||||||
loadConfFile();
|
loadConfFile();
|
||||||
settings.lockCPU = false;
|
settings.lockCPU = false;
|
||||||
_store = openStore();
|
_store = openStore();
|
||||||
|
|
|
@ -380,10 +380,9 @@ Installable::getCursors(EvalState & state)
|
||||||
ref<eval_cache::AttrCursor>
|
ref<eval_cache::AttrCursor>
|
||||||
Installable::getCursor(EvalState & state)
|
Installable::getCursor(EvalState & state)
|
||||||
{
|
{
|
||||||
auto cursors = getCursors(state);
|
/* Although getCursors should return at least one element, in case it doesn't,
|
||||||
if (cursors.empty())
|
bound check to avoid an undefined behavior for vector[0] */
|
||||||
throw Error("cannot find flake attribute '%s'", what());
|
return getCursors(state).at(0);
|
||||||
return cursors[0];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static StorePath getDeriver(
|
static StorePath getDeriver(
|
||||||
|
@ -696,46 +695,28 @@ InstallableFlake::getCursors(EvalState & state)
|
||||||
|
|
||||||
std::vector<ref<eval_cache::AttrCursor>> res;
|
std::vector<ref<eval_cache::AttrCursor>> res;
|
||||||
|
|
||||||
for (auto & attrPath : getActualAttrPaths()) {
|
|
||||||
auto attr = root->findAlongAttrPath(parseAttrPath(state, attrPath));
|
|
||||||
if (attr) res.push_back(ref(*attr));
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
ref<eval_cache::AttrCursor> InstallableFlake::getCursor(EvalState & state)
|
|
||||||
{
|
|
||||||
auto lockedFlake = getLockedFlake();
|
|
||||||
|
|
||||||
auto cache = openEvalCache(state, lockedFlake);
|
|
||||||
auto root = cache->getRoot();
|
|
||||||
|
|
||||||
Suggestions suggestions;
|
Suggestions suggestions;
|
||||||
|
|
||||||
auto attrPaths = getActualAttrPaths();
|
auto attrPaths = getActualAttrPaths();
|
||||||
|
|
||||||
for (auto & attrPath : attrPaths) {
|
for (auto & attrPath : attrPaths) {
|
||||||
debug("trying flake output attribute '%s'", attrPath);
|
debug("trying flake output attribute '%s'", attrPath);
|
||||||
|
|
||||||
auto attrOrSuggestions = root->findAlongAttrPath(
|
auto attr = root->findAlongAttrPath(parseAttrPath(state, attrPath));
|
||||||
parseAttrPath(state, attrPath),
|
if (attr) {
|
||||||
true
|
res.push_back(ref(*attr));
|
||||||
);
|
} else {
|
||||||
|
suggestions += attr.getSuggestions();
|
||||||
if (!attrOrSuggestions) {
|
}
|
||||||
suggestions += attrOrSuggestions.getSuggestions();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return *attrOrSuggestions;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (res.size() == 0)
|
||||||
throw Error(
|
throw Error(
|
||||||
suggestions,
|
suggestions,
|
||||||
"flake '%s' does not provide attribute %s",
|
"flake '%s' does not provide attribute %s",
|
||||||
flakeRef,
|
flakeRef,
|
||||||
showAttrPaths(attrPaths));
|
showAttrPaths(attrPaths));
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
|
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
|
||||||
|
|
|
@ -103,9 +103,13 @@ struct Installable
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Get a cursor to each value this Installable could refer to. However
|
||||||
|
if none exists, throw exception instead of returning empty vector. */
|
||||||
virtual std::vector<ref<eval_cache::AttrCursor>>
|
virtual std::vector<ref<eval_cache::AttrCursor>>
|
||||||
getCursors(EvalState & state);
|
getCursors(EvalState & state);
|
||||||
|
|
||||||
|
/* Get the first and most preferred cursor this Installable could refer
|
||||||
|
to, or throw an exception if none exists. */
|
||||||
virtual ref<eval_cache::AttrCursor>
|
virtual ref<eval_cache::AttrCursor>
|
||||||
getCursor(EvalState & state);
|
getCursor(EvalState & state);
|
||||||
|
|
||||||
|
@ -193,15 +197,11 @@ struct InstallableFlake : InstallableValue
|
||||||
|
|
||||||
std::pair<Value *, PosIdx> toValue(EvalState & state) override;
|
std::pair<Value *, PosIdx> toValue(EvalState & state) override;
|
||||||
|
|
||||||
/* Get a cursor to every attrpath in getActualAttrPaths() that
|
/* Get a cursor to every attrpath in getActualAttrPaths()
|
||||||
exists. */
|
that exists. However if none exists, throw an exception. */
|
||||||
std::vector<ref<eval_cache::AttrCursor>>
|
std::vector<ref<eval_cache::AttrCursor>>
|
||||||
getCursors(EvalState & state) override;
|
getCursors(EvalState & state) override;
|
||||||
|
|
||||||
/* Get a cursor to the first attrpath in getActualAttrPaths() that
|
|
||||||
exists, or throw an exception with suggestions if none exists. */
|
|
||||||
ref<eval_cache::AttrCursor> getCursor(EvalState & state) override;
|
|
||||||
|
|
||||||
std::shared_ptr<flake::LockedFlake> getLockedFlake() const;
|
std::shared_ptr<flake::LockedFlake> getLockedFlake() const;
|
||||||
|
|
||||||
FlakeRef nixpkgsFlakeRef() const override;
|
FlakeRef nixpkgsFlakeRef() const override;
|
||||||
|
|
|
@ -6,4 +6,4 @@ Name: Nix
|
||||||
Description: Nix Package Manager
|
Description: Nix Package Manager
|
||||||
Version: @PACKAGE_VERSION@
|
Version: @PACKAGE_VERSION@
|
||||||
Libs: -L${libdir} -lnixcmd
|
Libs: -L${libdir} -lnixcmd
|
||||||
Cflags: -I${includedir}/nix -std=c++17
|
Cflags: -I${includedir}/nix -std=c++20
|
||||||
|
|
|
@ -546,6 +546,7 @@ EvalState::EvalState(
|
||||||
static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes");
|
static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes");
|
||||||
|
|
||||||
/* Initialise the Nix expression search path. */
|
/* Initialise the Nix expression search path. */
|
||||||
|
evalSettings.nixPath.setDefault(evalSettings.getDefaultNixPath());
|
||||||
if (!evalSettings.pureEval) {
|
if (!evalSettings.pureEval) {
|
||||||
for (auto & i : _searchPath) addToSearchPath(i);
|
for (auto & i : _searchPath) addToSearchPath(i);
|
||||||
for (auto & i : evalSettings.nixPath.get()) addToSearchPath(i);
|
for (auto & i : evalSettings.nixPath.get()) addToSearchPath(i);
|
||||||
|
@ -2467,31 +2468,36 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) {
|
||||||
|
|
||||||
EvalSettings::EvalSettings()
|
EvalSettings::EvalSettings()
|
||||||
{
|
{
|
||||||
auto var = getEnv("NIX_PATH");
|
|
||||||
if (var) nixPath = parseNixPath(*var);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* impure => NIX_PATH or a default path
|
||||||
|
* restrict-eval => NIX_PATH
|
||||||
|
* pure-eval => empty
|
||||||
|
*/
|
||||||
Strings EvalSettings::getDefaultNixPath()
|
Strings EvalSettings::getDefaultNixPath()
|
||||||
{
|
{
|
||||||
Strings res;
|
if (pureEval)
|
||||||
auto add = [&](const Path & p, const std::string & s = std::string()) {
|
return {};
|
||||||
if (pathExists(p)) {
|
|
||||||
if (s.empty()) {
|
auto var = getEnv("NIX_PATH");
|
||||||
res.push_back(p);
|
if (var) {
|
||||||
|
return parseNixPath(*var);
|
||||||
|
} else if (restrictEval) {
|
||||||
|
return {};
|
||||||
} else {
|
} else {
|
||||||
res.push_back(s + "=" + p);
|
Strings res;
|
||||||
}
|
auto add = [&](const Path & p, const std::optional<std::string> & s = std::nullopt) {
|
||||||
}
|
if (pathExists(p))
|
||||||
|
res.push_back(s ? *s + "=" + p : p);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!evalSettings.restrictEval && !evalSettings.pureEval) {
|
|
||||||
add(getHome() + "/.nix-defexpr/channels");
|
add(getHome() + "/.nix-defexpr/channels");
|
||||||
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
|
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
|
||||||
add(settings.nixStateDir + "/profiles/per-user/root/channels");
|
add(settings.nixStateDir + "/profiles/per-user/root/channels");
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool EvalSettings::isPseudoUrl(std::string_view s)
|
bool EvalSettings::isPseudoUrl(std::string_view s)
|
||||||
{
|
{
|
||||||
|
|
|
@ -587,7 +587,7 @@ struct EvalSettings : Config
|
||||||
{
|
{
|
||||||
EvalSettings();
|
EvalSettings();
|
||||||
|
|
||||||
static Strings getDefaultNixPath();
|
Strings getDefaultNixPath();
|
||||||
|
|
||||||
static bool isPseudoUrl(std::string_view s);
|
static bool isPseudoUrl(std::string_view s);
|
||||||
|
|
||||||
|
@ -597,8 +597,15 @@ struct EvalSettings : Config
|
||||||
"Whether builtin functions that allow executing native code should be enabled."};
|
"Whether builtin functions that allow executing native code should be enabled."};
|
||||||
|
|
||||||
Setting<Strings> nixPath{
|
Setting<Strings> nixPath{
|
||||||
this, getDefaultNixPath(), "nix-path",
|
this, {}, "nix-path",
|
||||||
"List of directories to be searched for `<...>` file references."};
|
R"(
|
||||||
|
List of directories to be searched for `<...>` file references.
|
||||||
|
|
||||||
|
If [pure evaluation](#conf-pure-eval) is disabled,
|
||||||
|
this is initialised using the [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH)
|
||||||
|
environment variable, or, if it is unset and [restricted evaluation](#conf-restrict-eval)
|
||||||
|
is disabled, a default search path including the user's and `root`'s channels.
|
||||||
|
)"};
|
||||||
|
|
||||||
Setting<bool> restrictEval{
|
Setting<bool> restrictEval{
|
||||||
this, false, "restrict-eval",
|
this, false, "restrict-eval",
|
||||||
|
|
|
@ -208,7 +208,7 @@ std::optional<FlakeRef> LockFile::isUnlocked() const
|
||||||
visit(root);
|
visit(root);
|
||||||
|
|
||||||
for (auto & i : nodes) {
|
for (auto & i : nodes) {
|
||||||
if (i == root) continue;
|
if (i == ref<const Node>(root)) continue;
|
||||||
auto node = i.dynamic_pointer_cast<const LockedNode>();
|
auto node = i.dynamic_pointer_cast<const LockedNode>();
|
||||||
if (node
|
if (node
|
||||||
&& !node->lockedRef.input.isLocked()
|
&& !node->lockedRef.input.isLocked()
|
||||||
|
|
|
@ -7,4 +7,4 @@ Description: Nix Package Manager
|
||||||
Version: @PACKAGE_VERSION@
|
Version: @PACKAGE_VERSION@
|
||||||
Requires: nix-store bdw-gc
|
Requires: nix-store bdw-gc
|
||||||
Libs: -L${libdir} -lnixexpr
|
Libs: -L${libdir} -lnixexpr
|
||||||
Cflags: -I${includedir}/nix -std=c++17
|
Cflags: -I${includedir}/nix -std=c++20
|
||||||
|
|
|
@ -499,6 +499,17 @@ static RegisterPrimOp primop_fetchGit({
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> This behavior is disabled in *Pure evaluation mode*.
|
> This behavior is disabled in *Pure evaluation mode*.
|
||||||
|
|
||||||
|
- To fetch the content of a checked-out work directory:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
builtins.fetchGit ./work-dir
|
||||||
|
```
|
||||||
|
|
||||||
|
If the URL points to a local directory, and no `ref` or `rev` is
|
||||||
|
given, `fetchGit` will use the current content of the checked-out
|
||||||
|
files, even if they are not committed or added to Git's index. It will
|
||||||
|
only consider files added to the Git repository, as listed by `git ls-files`.
|
||||||
)",
|
)",
|
||||||
.fun = prim_fetchGit,
|
.fun = prim_fetchGit,
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include "libexprtests.hh"
|
#include "tests/libexpr.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#include "libexprtests.hh"
|
#include "tests/libexpr.hh"
|
||||||
#include "value-to-json.hh"
|
#include "value-to-json.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
|
@ -7,18 +7,19 @@
|
||||||
#include "eval-inline.hh"
|
#include "eval-inline.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
|
||||||
|
#include "tests/libstore.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
class LibExprTest : public ::testing::Test {
|
class LibExprTest : public LibStoreTest {
|
||||||
public:
|
public:
|
||||||
static void SetUpTestSuite() {
|
static void SetUpTestSuite() {
|
||||||
initLibStore();
|
LibStoreTest::SetUpTestSuite();
|
||||||
initGC();
|
initGC();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
LibExprTest()
|
LibExprTest()
|
||||||
: store(openStore("dummy://"))
|
: LibStoreTest()
|
||||||
, state({}, store)
|
, state({}, store)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -36,7 +37,6 @@ namespace nix {
|
||||||
return state.symbols.create(value);
|
return state.symbols.create(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
ref<Store> store;
|
|
||||||
EvalState state;
|
EvalState state;
|
||||||
};
|
};
|
||||||
|
|
|
@ -2,6 +2,8 @@ check: libexpr-tests_RUN
|
||||||
|
|
||||||
programs += libexpr-tests
|
programs += libexpr-tests
|
||||||
|
|
||||||
|
libexpr-tests_NAME := libnixexpr-tests
|
||||||
|
|
||||||
libexpr-tests_DIR := $(d)
|
libexpr-tests_DIR := $(d)
|
||||||
|
|
||||||
libexpr-tests_INSTALL_DIR :=
|
libexpr-tests_INSTALL_DIR :=
|
||||||
|
@ -12,6 +14,6 @@ libexpr-tests_SOURCES := \
|
||||||
|
|
||||||
libexpr-tests_CXXFLAGS += -I src/libexpr -I src/libutil -I src/libstore -I src/libexpr/tests -I src/libfetchers
|
libexpr-tests_CXXFLAGS += -I src/libexpr -I src/libutil -I src/libstore -I src/libexpr/tests -I src/libfetchers
|
||||||
|
|
||||||
libexpr-tests_LIBS = libexpr libutil libstore libfetchers
|
libexpr-tests_LIBS = libstore-tests libutils-tests libexpr libutil libstore libfetchers
|
||||||
|
|
||||||
libexpr-tests_LDFLAGS := $(GTEST_LIBS) -lgmock
|
libexpr-tests_LDFLAGS := $(GTEST_LIBS) -lgmock
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include "libexprtests.hh"
|
#include "tests/libexpr.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
class CaptureLogger : public Logger
|
class CaptureLogger : public Logger
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#include "libexprtests.hh"
|
#include "tests/libexpr.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
// Testing of trivial expressions
|
// Testing of trivial expressions
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
#include "value/context.hh"
|
#include <nlohmann/json.hpp>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <rapidcheck/gtest.h>
|
||||||
|
|
||||||
#include "libexprtests.hh"
|
#include "tests/path.hh"
|
||||||
|
#include "tests/libexpr.hh"
|
||||||
|
#include "tests/value/context.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -70,3 +74,54 @@ TEST_F(NixStringContextElemTest, built) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
Gen<NixStringContextElem::Opaque> Arbitrary<NixStringContextElem::Opaque>::arbitrary()
|
||||||
|
{
|
||||||
|
return gen::just(NixStringContextElem::Opaque {
|
||||||
|
.path = *gen::arbitrary<StorePath>(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Gen<NixStringContextElem::DrvDeep> Arbitrary<NixStringContextElem::DrvDeep>::arbitrary()
|
||||||
|
{
|
||||||
|
return gen::just(NixStringContextElem::DrvDeep {
|
||||||
|
.drvPath = *gen::arbitrary<StorePath>(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Gen<NixStringContextElem::Built> Arbitrary<NixStringContextElem::Built>::arbitrary()
|
||||||
|
{
|
||||||
|
return gen::just(NixStringContextElem::Built {
|
||||||
|
.drvPath = *gen::arbitrary<StorePath>(),
|
||||||
|
.output = (*gen::arbitrary<StorePathName>()).name,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Gen<NixStringContextElem> Arbitrary<NixStringContextElem>::arbitrary()
|
||||||
|
{
|
||||||
|
switch (*gen::inRange<uint8_t>(0, 2)) {
|
||||||
|
case 0:
|
||||||
|
return gen::just<NixStringContextElem>(*gen::arbitrary<NixStringContextElem::Opaque>());
|
||||||
|
case 1:
|
||||||
|
return gen::just<NixStringContextElem>(*gen::arbitrary<NixStringContextElem::DrvDeep>());
|
||||||
|
default:
|
||||||
|
return gen::just<NixStringContextElem>(*gen::arbitrary<NixStringContextElem::Built>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
RC_GTEST_FIXTURE_PROP(
|
||||||
|
NixStringContextElemTest,
|
||||||
|
prop_round_rip,
|
||||||
|
(const NixStringContextElem & o))
|
||||||
|
{
|
||||||
|
RC_ASSERT(o == NixStringContextElem::parse(store(), o.to_string(store())));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
30
src/libexpr/tests/value/context.hh
Normal file
30
src/libexpr/tests/value/context.hh
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <rapidcheck/gen/Arbitrary.h>
|
||||||
|
|
||||||
|
#include <value/context.hh>
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<NixStringContextElem::Opaque> {
|
||||||
|
static Gen<NixStringContextElem::Opaque> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<NixStringContextElem::Built> {
|
||||||
|
static Gen<NixStringContextElem::Built> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<NixStringContextElem::DrvDeep> {
|
||||||
|
static Gen<NixStringContextElem::DrvDeep> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<NixStringContextElem> {
|
||||||
|
static Gen<NixStringContextElem> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -1,9 +1,10 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
|
#include "comparator.hh"
|
||||||
#include "path.hh"
|
#include "path.hh"
|
||||||
|
|
||||||
#include <optional>
|
#include <variant>
|
||||||
|
|
||||||
#include <nlohmann/json_fwd.hpp>
|
#include <nlohmann/json_fwd.hpp>
|
||||||
|
|
||||||
|
@ -32,6 +33,8 @@ class Store;
|
||||||
*/
|
*/
|
||||||
struct NixStringContextElem_Opaque {
|
struct NixStringContextElem_Opaque {
|
||||||
StorePath path;
|
StorePath path;
|
||||||
|
|
||||||
|
GENERATE_CMP(NixStringContextElem_Opaque, me->path);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Path to a derivation and its entire build closure.
|
/* Path to a derivation and its entire build closure.
|
||||||
|
@ -44,6 +47,8 @@ struct NixStringContextElem_Opaque {
|
||||||
*/
|
*/
|
||||||
struct NixStringContextElem_DrvDeep {
|
struct NixStringContextElem_DrvDeep {
|
||||||
StorePath drvPath;
|
StorePath drvPath;
|
||||||
|
|
||||||
|
GENERATE_CMP(NixStringContextElem_DrvDeep, me->drvPath);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Derivation output.
|
/* Derivation output.
|
||||||
|
@ -53,6 +58,8 @@ struct NixStringContextElem_DrvDeep {
|
||||||
struct NixStringContextElem_Built {
|
struct NixStringContextElem_Built {
|
||||||
StorePath drvPath;
|
StorePath drvPath;
|
||||||
std::string output;
|
std::string output;
|
||||||
|
|
||||||
|
GENERATE_CMP(NixStringContextElem_Built, me->drvPath, me->output);
|
||||||
};
|
};
|
||||||
|
|
||||||
using _NixStringContextElem_Raw = std::variant<
|
using _NixStringContextElem_Raw = std::variant<
|
||||||
|
|
|
@ -6,4 +6,4 @@ Name: Nix
|
||||||
Description: Nix Package Manager
|
Description: Nix Package Manager
|
||||||
Version: @PACKAGE_VERSION@
|
Version: @PACKAGE_VERSION@
|
||||||
Libs: -L${libdir} -lnixmain
|
Libs: -L${libdir} -lnixmain
|
||||||
Cflags: -I${includedir}/nix -std=c++17
|
Cflags: -I${includedir}/nix -std=c++20
|
||||||
|
|
|
@ -370,7 +370,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
|
||||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||||
|
|
||||||
getFile(narInfoFile,
|
getFile(narInfoFile,
|
||||||
{[=](std::future<std::optional<std::string>> fut) {
|
{[=,this](std::future<std::optional<std::string>> fut) {
|
||||||
try {
|
try {
|
||||||
auto data = fut.get();
|
auto data = fut.get();
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include "json-utils.hh"
|
#include "json-utils.hh"
|
||||||
#include "cgroup.hh"
|
#include "cgroup.hh"
|
||||||
#include "personality.hh"
|
#include "personality.hh"
|
||||||
|
#include "namespaces.hh"
|
||||||
|
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
@ -167,7 +168,8 @@ void LocalDerivationGoal::killSandbox(bool getStats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LocalDerivationGoal::tryLocalBuild() {
|
void LocalDerivationGoal::tryLocalBuild()
|
||||||
|
{
|
||||||
unsigned int curBuilds = worker.getNrLocalBuilds();
|
unsigned int curBuilds = worker.getNrLocalBuilds();
|
||||||
if (curBuilds >= settings.maxBuildJobs) {
|
if (curBuilds >= settings.maxBuildJobs) {
|
||||||
state = &DerivationGoal::tryToBuild;
|
state = &DerivationGoal::tryToBuild;
|
||||||
|
@ -205,6 +207,17 @@ void LocalDerivationGoal::tryLocalBuild() {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if __linux__
|
||||||
|
if (useChroot) {
|
||||||
|
if (!mountNamespacesSupported() || !pidNamespacesSupported()) {
|
||||||
|
if (!settings.sandboxFallback)
|
||||||
|
throw Error("this system does not support the kernel namespaces that are required for sandboxing; use '--no-sandbox' to disable sandboxing");
|
||||||
|
debug("auto-disabling sandboxing because the prerequisite namespaces are not available");
|
||||||
|
useChroot = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (useBuildUsers()) {
|
if (useBuildUsers()) {
|
||||||
if (!buildUser)
|
if (!buildUser)
|
||||||
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
|
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
|
||||||
|
@ -888,12 +901,7 @@ void LocalDerivationGoal::startBuilder()
|
||||||
|
|
||||||
userNamespaceSync.create();
|
userNamespaceSync.create();
|
||||||
|
|
||||||
Path maxUserNamespaces = "/proc/sys/user/max_user_namespaces";
|
usingUserNamespace = userNamespacesSupported();
|
||||||
static bool userNamespacesEnabled =
|
|
||||||
pathExists(maxUserNamespaces)
|
|
||||||
&& trim(readFile(maxUserNamespaces)) != "0";
|
|
||||||
|
|
||||||
usingUserNamespace = userNamespacesEnabled;
|
|
||||||
|
|
||||||
Pid helper = startProcess([&]() {
|
Pid helper = startProcess([&]() {
|
||||||
|
|
||||||
|
@ -920,64 +928,15 @@ void LocalDerivationGoal::startBuilder()
|
||||||
flags |= CLONE_NEWUSER;
|
flags |= CLONE_NEWUSER;
|
||||||
|
|
||||||
pid_t child = clone(childEntry, stack + stackSize, flags, this);
|
pid_t child = clone(childEntry, stack + stackSize, flags, this);
|
||||||
if (child == -1 && errno == EINVAL) {
|
|
||||||
/* Fallback for Linux < 2.13 where CLONE_NEWPID and
|
if (child == -1)
|
||||||
CLONE_PARENT are not allowed together. */
|
|
||||||
flags &= ~CLONE_NEWPID;
|
|
||||||
child = clone(childEntry, stack + stackSize, flags, this);
|
|
||||||
}
|
|
||||||
if (usingUserNamespace && child == -1 && (errno == EPERM || errno == EINVAL)) {
|
|
||||||
/* Some distros patch Linux to not allow unprivileged
|
|
||||||
* user namespaces. If we get EPERM or EINVAL, try
|
|
||||||
* without CLONE_NEWUSER and see if that works.
|
|
||||||
* Details: https://salsa.debian.org/kernel-team/linux/-/commit/d98e00eda6bea437e39b9e80444eee84a32438a6
|
|
||||||
*/
|
|
||||||
usingUserNamespace = false;
|
|
||||||
flags &= ~CLONE_NEWUSER;
|
|
||||||
child = clone(childEntry, stack + stackSize, flags, this);
|
|
||||||
}
|
|
||||||
if (child == -1) {
|
|
||||||
switch(errno) {
|
|
||||||
case EPERM:
|
|
||||||
case EINVAL: {
|
|
||||||
int errno_ = errno;
|
|
||||||
if (!userNamespacesEnabled && errno==EPERM)
|
|
||||||
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/user/max_user_namespaces");
|
|
||||||
if (userNamespacesEnabled) {
|
|
||||||
Path procSysKernelUnprivilegedUsernsClone = "/proc/sys/kernel/unprivileged_userns_clone";
|
|
||||||
if (pathExists(procSysKernelUnprivilegedUsernsClone)
|
|
||||||
&& trim(readFile(procSysKernelUnprivilegedUsernsClone)) == "0") {
|
|
||||||
notice("user namespaces appear to be disabled; they are required for sandboxing; check /proc/sys/kernel/unprivileged_userns_clone");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Path procSelfNsUser = "/proc/self/ns/user";
|
|
||||||
if (!pathExists(procSelfNsUser))
|
|
||||||
notice("/proc/self/ns/user does not exist; your kernel was likely built without CONFIG_USER_NS=y, which is required for sandboxing");
|
|
||||||
/* Otherwise exit with EPERM so we can handle this in the
|
|
||||||
parent. This is only done when sandbox-fallback is set
|
|
||||||
to true (the default). */
|
|
||||||
if (settings.sandboxFallback)
|
|
||||||
_exit(1);
|
|
||||||
/* Mention sandbox-fallback in the error message so the user
|
|
||||||
knows that having it disabled contributed to the
|
|
||||||
unrecoverability of this failure */
|
|
||||||
throw SysError(errno_, "creating sandboxed builder process using clone(), without sandbox-fallback");
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
throw SysError("creating sandboxed builder process using clone()");
|
throw SysError("creating sandboxed builder process using clone()");
|
||||||
}
|
|
||||||
}
|
|
||||||
writeFull(builderOut.writeSide.get(),
|
writeFull(builderOut.writeSide.get(),
|
||||||
fmt("%d %d\n", usingUserNamespace, child));
|
fmt("%d %d\n", usingUserNamespace, child));
|
||||||
_exit(0);
|
_exit(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
int res = helper.wait();
|
if (helper.wait() != 0)
|
||||||
if (res != 0 && settings.sandboxFallback) {
|
|
||||||
useChroot = false;
|
|
||||||
initTmpDir();
|
|
||||||
goto fallback;
|
|
||||||
} else if (res != 0)
|
|
||||||
throw Error("unable to start build process");
|
throw Error("unable to start build process");
|
||||||
|
|
||||||
userNamespaceSync.readSide = -1;
|
userNamespaceSync.readSide = -1;
|
||||||
|
@ -1045,9 +1004,6 @@ void LocalDerivationGoal::startBuilder()
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
#if __linux__
|
|
||||||
fallback:
|
|
||||||
#endif
|
|
||||||
pid = startProcess([&]() {
|
pid = startProcess([&]() {
|
||||||
runChild();
|
runChild();
|
||||||
});
|
});
|
||||||
|
@ -1516,8 +1472,7 @@ void LocalDerivationGoal::startDaemon()
|
||||||
FdSink to(remote.get());
|
FdSink to(remote.get());
|
||||||
try {
|
try {
|
||||||
daemon::processConnection(store, from, to,
|
daemon::processConnection(store, from, to,
|
||||||
daemon::NotTrusted, daemon::Recursive,
|
daemon::NotTrusted, daemon::Recursive);
|
||||||
[&](Store & store) { store.createUser("nobody", 65535); });
|
|
||||||
debug("terminated daemon connection");
|
debug("terminated daemon connection");
|
||||||
} catch (SysError &) {
|
} catch (SysError &) {
|
||||||
ignoreException();
|
ignoreException();
|
||||||
|
@ -2323,11 +2278,28 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
|
||||||
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
|
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
|
||||||
inodesSeen);
|
inodesSeen);
|
||||||
|
|
||||||
|
bool discardReferences = false;
|
||||||
|
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
|
||||||
|
if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) {
|
||||||
|
settings.requireExperimentalFeature(Xp::DiscardReferences);
|
||||||
|
if (auto output = get(*udr, outputName)) {
|
||||||
|
if (!output->is_boolean())
|
||||||
|
throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string());
|
||||||
|
discardReferences = output->get<bool>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
StorePathSet references;
|
||||||
|
if (discardReferences)
|
||||||
|
debug("discarding references of output '%s'", outputName);
|
||||||
|
else {
|
||||||
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
|
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
|
||||||
|
|
||||||
/* Pass blank Sink as we are not ready to hash data at this stage. */
|
/* Pass blank Sink as we are not ready to hash data at this stage. */
|
||||||
NullSink blank;
|
NullSink blank;
|
||||||
auto references = scanForReferences(blank, actualPath, referenceablePaths);
|
references = scanForReferences(blank, actualPath, referenceablePaths);
|
||||||
|
}
|
||||||
|
|
||||||
outputReferencesIfUnregistered.insert_or_assign(
|
outputReferencesIfUnregistered.insert_or_assign(
|
||||||
outputName,
|
outputName,
|
||||||
|
|
|
@ -276,7 +276,7 @@ void Worker::run(const Goals & _topGoals)
|
||||||
if (!children.empty() || !waitingForAWhile.empty())
|
if (!children.empty() || !waitingForAWhile.empty())
|
||||||
waitForInput();
|
waitForInput();
|
||||||
else {
|
else {
|
||||||
if (awake.empty() && 0 == settings.maxBuildJobs)
|
if (awake.empty() && 0U == settings.maxBuildJobs)
|
||||||
{
|
{
|
||||||
if (getMachines().empty())
|
if (getMachines().empty())
|
||||||
throw Error("unable to start any build; either increase '--max-jobs' "
|
throw Error("unable to start any build; either increase '--max-jobs' "
|
||||||
|
|
|
@ -236,6 +236,10 @@ struct ClientSettings
|
||||||
// the daemon, as that could cause some pretty weird stuff
|
// the daemon, as that could cause some pretty weird stuff
|
||||||
if (parseFeatures(tokenizeString<StringSet>(value)) != settings.experimentalFeatures.get())
|
if (parseFeatures(tokenizeString<StringSet>(value)) != settings.experimentalFeatures.get())
|
||||||
debug("Ignoring the client-specified experimental features");
|
debug("Ignoring the client-specified experimental features");
|
||||||
|
} else if (name == settings.pluginFiles.name) {
|
||||||
|
if (tokenizeString<Paths>(value) != settings.pluginFiles.get())
|
||||||
|
warn("Ignoring the client-specified plugin-files.\n"
|
||||||
|
"The client specifying plugins to the daemon never made sense, and was removed in Nix >=2.14.");
|
||||||
}
|
}
|
||||||
else if (trusted
|
else if (trusted
|
||||||
|| name == settings.buildTimeout.name
|
|| name == settings.buildTimeout.name
|
||||||
|
@ -525,7 +529,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
mode = (BuildMode) readInt(from);
|
mode = (BuildMode) readInt(from);
|
||||||
|
|
||||||
/* Repairing is not atomic, so disallowed for "untrusted"
|
/* Repairing is not atomic, so disallowed for "untrusted"
|
||||||
clients. */
|
clients.
|
||||||
|
|
||||||
|
FIXME: layer violation in this message: the daemon code (i.e.
|
||||||
|
this file) knows whether a client/connection is trusted, but it
|
||||||
|
does not how how the client was authenticated. The mechanism
|
||||||
|
need not be getting the UID of the other end of a Unix Domain
|
||||||
|
Socket.
|
||||||
|
*/
|
||||||
if (mode == bmRepair && !trusted)
|
if (mode == bmRepair && !trusted)
|
||||||
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
||||||
}
|
}
|
||||||
|
@ -542,7 +553,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
mode = (BuildMode) readInt(from);
|
mode = (BuildMode) readInt(from);
|
||||||
|
|
||||||
/* Repairing is not atomic, so disallowed for "untrusted"
|
/* Repairing is not atomic, so disallowed for "untrusted"
|
||||||
clients. */
|
clients.
|
||||||
|
|
||||||
|
FIXME: layer violation; see above. */
|
||||||
if (mode == bmRepair && !trusted)
|
if (mode == bmRepair && !trusted)
|
||||||
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
||||||
|
|
||||||
|
@ -981,8 +994,7 @@ void processConnection(
|
||||||
FdSource & from,
|
FdSource & from,
|
||||||
FdSink & to,
|
FdSink & to,
|
||||||
TrustedFlag trusted,
|
TrustedFlag trusted,
|
||||||
RecursiveFlag recursive,
|
RecursiveFlag recursive)
|
||||||
std::function<void(Store &)> authHook)
|
|
||||||
{
|
{
|
||||||
auto monitor = !recursive ? std::make_unique<MonitorFdHup>(from.fd) : nullptr;
|
auto monitor = !recursive ? std::make_unique<MonitorFdHup>(from.fd) : nullptr;
|
||||||
|
|
||||||
|
@ -1025,10 +1037,6 @@ void processConnection(
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
/* If we can't accept clientVersion, then throw an error
|
|
||||||
*here* (not above). */
|
|
||||||
authHook(*store);
|
|
||||||
|
|
||||||
tunnelLogger->stopWork();
|
tunnelLogger->stopWork();
|
||||||
to.flush();
|
to.flush();
|
||||||
|
|
||||||
|
|
|
@ -13,11 +13,6 @@ void processConnection(
|
||||||
FdSource & from,
|
FdSource & from,
|
||||||
FdSink & to,
|
FdSink & to,
|
||||||
TrustedFlag trusted,
|
TrustedFlag trusted,
|
||||||
RecursiveFlag recursive,
|
RecursiveFlag recursive);
|
||||||
/* Arbitrary hook to check authorization / initialize user data / whatever
|
|
||||||
after the protocol has been negotiated. The idea is that this function
|
|
||||||
and everything it calls doesn't know about this stuff, and the
|
|
||||||
`nix-daemon` handles that instead. */
|
|
||||||
std::function<void(Store &)> authHook);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,8 +4,9 @@
|
||||||
#include "path.hh"
|
#include "path.hh"
|
||||||
#include "realisation.hh"
|
#include "realisation.hh"
|
||||||
#include "outputs-spec.hh"
|
#include "outputs-spec.hh"
|
||||||
|
#include "comparator.hh"
|
||||||
|
|
||||||
#include <optional>
|
#include <variant>
|
||||||
|
|
||||||
#include <nlohmann/json_fwd.hpp>
|
#include <nlohmann/json_fwd.hpp>
|
||||||
|
|
||||||
|
@ -27,8 +28,7 @@ struct DerivedPathOpaque {
|
||||||
std::string to_string(const Store & store) const;
|
std::string to_string(const Store & store) const;
|
||||||
static DerivedPathOpaque parse(const Store & store, std::string_view);
|
static DerivedPathOpaque parse(const Store & store, std::string_view);
|
||||||
|
|
||||||
bool operator < (const DerivedPathOpaque & b) const
|
GENERATE_CMP(DerivedPathOpaque, me->path);
|
||||||
{ return path < b.path; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -51,8 +51,7 @@ struct DerivedPathBuilt {
|
||||||
static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
|
static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
|
||||||
nlohmann::json toJSON(ref<Store> store) const;
|
nlohmann::json toJSON(ref<Store> store) const;
|
||||||
|
|
||||||
bool operator < (const DerivedPathBuilt & b) const
|
GENERATE_CMP(DerivedPathBuilt, me->drvPath, me->outputs);
|
||||||
{ return std::make_pair(drvPath, outputs) < std::make_pair(b.drvPath, b.outputs); }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
using _DerivedPathRaw = std::variant<
|
using _DerivedPathRaw = std::variant<
|
||||||
|
@ -96,6 +95,8 @@ struct BuiltPathBuilt {
|
||||||
|
|
||||||
nlohmann::json toJSON(ref<Store> store) const;
|
nlohmann::json toJSON(ref<Store> store) const;
|
||||||
static BuiltPathBuilt parse(const Store & store, std::string_view);
|
static BuiltPathBuilt parse(const Store & store, std::string_view);
|
||||||
|
|
||||||
|
GENERATE_CMP(BuiltPathBuilt, me->drvPath, me->outputs);
|
||||||
};
|
};
|
||||||
|
|
||||||
using _BuiltPathRaw = std::variant<
|
using _BuiltPathRaw = std::variant<
|
||||||
|
|
|
@ -222,19 +222,19 @@ template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::s
|
||||||
.longName = name,
|
.longName = name,
|
||||||
.description = "Enable sandboxing.",
|
.description = "Enable sandboxing.",
|
||||||
.category = category,
|
.category = category,
|
||||||
.handler = {[=]() { override(smEnabled); }}
|
.handler = {[this]() { override(smEnabled); }}
|
||||||
});
|
});
|
||||||
args.addFlag({
|
args.addFlag({
|
||||||
.longName = "no-" + name,
|
.longName = "no-" + name,
|
||||||
.description = "Disable sandboxing.",
|
.description = "Disable sandboxing.",
|
||||||
.category = category,
|
.category = category,
|
||||||
.handler = {[=]() { override(smDisabled); }}
|
.handler = {[this]() { override(smDisabled); }}
|
||||||
});
|
});
|
||||||
args.addFlag({
|
args.addFlag({
|
||||||
.longName = "relaxed-" + name,
|
.longName = "relaxed-" + name,
|
||||||
.description = "Enable sandboxing, but allow builds to disable it.",
|
.description = "Enable sandboxing, but allow builds to disable it.",
|
||||||
.category = category,
|
.category = category,
|
||||||
.handler = {[=]() { override(smRelaxed); }}
|
.handler = {[this]() { override(smRelaxed); }}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -279,8 +279,8 @@ public:
|
||||||
If the build users group is empty, builds will be performed under
|
If the build users group is empty, builds will be performed under
|
||||||
the uid of the Nix process (that is, the uid of the caller if
|
the uid of the Nix process (that is, the uid of the caller if
|
||||||
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
|
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
|
||||||
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
|
`NIX_REMOTE` is `daemon`). Obviously, this should not be used
|
||||||
multi-user settings with untrusted users.
|
with a nix daemon accessible to untrusted clients.
|
||||||
|
|
||||||
Defaults to `nixbld` when running as root, *empty* otherwise.
|
Defaults to `nixbld` when running as root, *empty* otherwise.
|
||||||
)",
|
)",
|
||||||
|
@ -696,24 +696,6 @@ public:
|
||||||
)",
|
)",
|
||||||
{"trusted-binary-caches"}};
|
{"trusted-binary-caches"}};
|
||||||
|
|
||||||
Setting<Strings> trustedUsers{
|
|
||||||
this, {"root"}, "trusted-users",
|
|
||||||
R"(
|
|
||||||
A list of names of users (separated by whitespace) that have
|
|
||||||
additional rights when connecting to the Nix daemon, such as the
|
|
||||||
ability to specify additional binary caches, or to import unsigned
|
|
||||||
NARs. You can also specify groups by prefixing them with `@`; for
|
|
||||||
instance, `@wheel` means all users in the `wheel` group. The default
|
|
||||||
is `root`.
|
|
||||||
|
|
||||||
> **Warning**
|
|
||||||
>
|
|
||||||
> Adding a user to `trusted-users` is essentially equivalent to
|
|
||||||
> giving that user root access to the system. For example, the user
|
|
||||||
> can set `sandbox-paths` and thereby obtain read access to
|
|
||||||
> directories that are otherwise inacessible to them.
|
|
||||||
)"};
|
|
||||||
|
|
||||||
Setting<unsigned int> ttlNegativeNarInfoCache{
|
Setting<unsigned int> ttlNegativeNarInfoCache{
|
||||||
this, 3600, "narinfo-cache-negative-ttl",
|
this, 3600, "narinfo-cache-negative-ttl",
|
||||||
R"(
|
R"(
|
||||||
|
@ -736,18 +718,6 @@ public:
|
||||||
mismatch if the build isn't reproducible.
|
mismatch if the build isn't reproducible.
|
||||||
)"};
|
)"};
|
||||||
|
|
||||||
/* ?Who we trust to use the daemon in safe ways */
|
|
||||||
Setting<Strings> allowedUsers{
|
|
||||||
this, {"*"}, "allowed-users",
|
|
||||||
R"(
|
|
||||||
A list of names of users (separated by whitespace) that are allowed
|
|
||||||
to connect to the Nix daemon. As with the `trusted-users` option,
|
|
||||||
you can specify groups by prefixing them with `@`. Also, you can
|
|
||||||
allow all users by specifying `*`. The default is `*`.
|
|
||||||
|
|
||||||
Note that trusted users are always allowed to connect.
|
|
||||||
)"};
|
|
||||||
|
|
||||||
Setting<bool> printMissing{this, true, "print-missing",
|
Setting<bool> printMissing{this, true, "print-missing",
|
||||||
"Whether to print what paths need to be built or downloaded."};
|
"Whether to print what paths need to be built or downloaded."};
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,6 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
|
||||||
/* Hash will be set below. FIXME construct ValidPathInfo at end. */
|
/* Hash will be set below. FIXME construct ValidPathInfo at end. */
|
||||||
auto info = std::make_shared<ValidPathInfo>(path, Hash::dummy);
|
auto info = std::make_shared<ValidPathInfo>(path, Hash::dummy);
|
||||||
|
|
||||||
PathSet references;
|
|
||||||
auto deriver = readString(conn->from);
|
auto deriver = readString(conn->from);
|
||||||
if (deriver != "")
|
if (deriver != "")
|
||||||
info->deriver = parseStorePath(deriver);
|
info->deriver = parseStorePath(deriver);
|
||||||
|
|
|
@ -201,8 +201,6 @@ LocalStore::LocalStore(const Params & params)
|
||||||
throw SysError("could not set permissions on '%s' to 755", perUserDir);
|
throw SysError("could not set permissions on '%s' to 755", perUserDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
createUser(getUserName(), getuid());
|
|
||||||
|
|
||||||
/* Optionally, create directories and set permissions for a
|
/* Optionally, create directories and set permissions for a
|
||||||
multi-user install. */
|
multi-user install. */
|
||||||
if (getuid() == 0 && settings.buildUsersGroup != "") {
|
if (getuid() == 0 && settings.buildUsersGroup != "") {
|
||||||
|
@ -1824,20 +1822,6 @@ void LocalStore::signPathInfo(ValidPathInfo & info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::createUser(const std::string & userName, uid_t userId)
|
|
||||||
{
|
|
||||||
for (auto & dir : {
|
|
||||||
fmt("%s/profiles/per-user/%s", stateDir, userName),
|
|
||||||
fmt("%s/gcroots/per-user/%s", stateDir, userName)
|
|
||||||
}) {
|
|
||||||
createDirs(dir);
|
|
||||||
if (chmod(dir.c_str(), 0755) == -1)
|
|
||||||
throw SysError("changing permissions of directory '%s'", dir);
|
|
||||||
if (chown(dir.c_str(), userId, getgid()) == -1)
|
|
||||||
throw SysError("changing owner of directory '%s'", dir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_(
|
std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_(
|
||||||
LocalStore::State & state,
|
LocalStore::State & state,
|
||||||
const DrvOutput & id)
|
const DrvOutput & id)
|
||||||
|
|
|
@ -281,8 +281,6 @@ private:
|
||||||
void signPathInfo(ValidPathInfo & info);
|
void signPathInfo(ValidPathInfo & info);
|
||||||
void signRealisation(Realisation &);
|
void signRealisation(Realisation &);
|
||||||
|
|
||||||
void createUser(const std::string & userName, uid_t userId) override;
|
|
||||||
|
|
||||||
// XXX: Make a generic `Store` method
|
// XXX: Make a generic `Store` method
|
||||||
FixedOutputHash hashCAPath(
|
FixedOutputHash hashCAPath(
|
||||||
const FileIngestionMethod & method,
|
const FileIngestionMethod & method,
|
||||||
|
|
|
@ -6,4 +6,4 @@ Name: Nix
|
||||||
Description: Nix Package Manager
|
Description: Nix Package Manager
|
||||||
Version: @PACKAGE_VERSION@
|
Version: @PACKAGE_VERSION@
|
||||||
Libs: -L${libdir} -lnixstore -lnixutil
|
Libs: -L${libdir} -lnixstore -lnixutil
|
||||||
Cflags: -I${includedir}/nix -std=c++17
|
Cflags: -I${includedir}/nix -std=c++20
|
||||||
|
|
|
@ -81,12 +81,6 @@ struct ValidPathInfo
|
||||||
/* Return true iff the path is verifiably content-addressed. */
|
/* Return true iff the path is verifiably content-addressed. */
|
||||||
bool isContentAddressed(const Store & store) const;
|
bool isContentAddressed(const Store & store) const;
|
||||||
|
|
||||||
/* Functions to view references + hasSelfReference as one set, mainly for
|
|
||||||
compatibility's sake. */
|
|
||||||
StorePathSet referencesPossiblyToSelf() const;
|
|
||||||
void insertReferencePossiblyToSelf(StorePath && ref);
|
|
||||||
void setReferencesPossiblyToSelf(StorePathSet && refs);
|
|
||||||
|
|
||||||
static const size_t maxSigs = std::numeric_limits<size_t>::max();
|
static const size_t maxSigs = std::numeric_limits<size_t>::max();
|
||||||
|
|
||||||
/* Return the number of signatures on this .narinfo that were
|
/* Return the number of signatures on this .narinfo that were
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "content-address.hh"
|
#include <string_view>
|
||||||
|
|
||||||
#include "types.hh"
|
#include "types.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
@ -66,8 +67,6 @@ public:
|
||||||
typedef std::set<StorePath> StorePathSet;
|
typedef std::set<StorePath> StorePathSet;
|
||||||
typedef std::vector<StorePath> StorePaths;
|
typedef std::vector<StorePath> StorePaths;
|
||||||
|
|
||||||
typedef std::map<StorePath, std::optional<ContentAddress>> StorePathCAMap;
|
|
||||||
|
|
||||||
/* Extension of derivations in the Nix store. */
|
/* Extension of derivations in the Nix store. */
|
||||||
const std::string drvExtension = ".drv";
|
const std::string drvExtension = ".drv";
|
||||||
|
|
||||||
|
|
|
@ -280,16 +280,24 @@ std::string optimisticLockProfile(const Path & profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path profilesDir()
|
||||||
|
{
|
||||||
|
auto profileRoot = getDataDir() + "/nix/profiles";
|
||||||
|
createDirs(profileRoot);
|
||||||
|
return profileRoot;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Path getDefaultProfile()
|
Path getDefaultProfile()
|
||||||
{
|
{
|
||||||
Path profileLink = getHome() + "/.nix-profile";
|
Path profileLink = getHome() + "/.nix-profile";
|
||||||
try {
|
try {
|
||||||
if (!pathExists(profileLink)) {
|
auto profile =
|
||||||
replaceSymlink(
|
|
||||||
getuid() == 0
|
getuid() == 0
|
||||||
? settings.nixStateDir + "/profiles/default"
|
? settings.nixStateDir + "/profiles/default"
|
||||||
: fmt("%s/profiles/per-user/%s/profile", settings.nixStateDir, getUserName()),
|
: profilesDir() + "/profile";
|
||||||
profileLink);
|
if (!pathExists(profileLink)) {
|
||||||
|
replaceSymlink(profile, profileLink);
|
||||||
}
|
}
|
||||||
return absPath(readLink(profileLink), dirOf(profileLink));
|
return absPath(readLink(profileLink), dirOf(profileLink));
|
||||||
} catch (Error &) {
|
} catch (Error &) {
|
||||||
|
|
|
@ -68,6 +68,10 @@ void lockProfile(PathLocks & lock, const Path & profile);
|
||||||
rebuilt. */
|
rebuilt. */
|
||||||
std::string optimisticLockProfile(const Path & profile);
|
std::string optimisticLockProfile(const Path & profile);
|
||||||
|
|
||||||
|
/* Creates and returns the path to a directory suitable for storing the user’s
|
||||||
|
profiles. */
|
||||||
|
Path profilesDir();
|
||||||
|
|
||||||
/* Resolve ~/.nix-profile. If ~/.nix-profile doesn't exist yet, create
|
/* Resolve ~/.nix-profile. If ~/.nix-profile doesn't exist yet, create
|
||||||
it. */
|
it. */
|
||||||
Path getDefaultProfile();
|
Path getDefaultProfile();
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <variant>
|
||||||
|
|
||||||
|
#include "hash.hh"
|
||||||
#include "path.hh"
|
#include "path.hh"
|
||||||
#include <nlohmann/json_fwd.hpp>
|
#include <nlohmann/json_fwd.hpp>
|
||||||
#include "comparator.hh"
|
#include "comparator.hh"
|
||||||
|
|
|
@ -266,6 +266,7 @@ void RemoteStore::setOptions(Connection & conn)
|
||||||
overrides.erase(settings.useSubstitutes.name);
|
overrides.erase(settings.useSubstitutes.name);
|
||||||
overrides.erase(loggerSettings.showTrace.name);
|
overrides.erase(loggerSettings.showTrace.name);
|
||||||
overrides.erase(settings.experimentalFeatures.name);
|
overrides.erase(settings.experimentalFeatures.name);
|
||||||
|
overrides.erase(settings.pluginFiles.name);
|
||||||
conn.to << overrides.size();
|
conn.to << overrides.size();
|
||||||
for (auto & i : overrides)
|
for (auto & i : overrides)
|
||||||
conn.to << i.first << i.second.value;
|
conn.to << i.first << i.second.value;
|
||||||
|
|
|
@ -746,13 +746,13 @@ StorePathSet Store::queryValidPaths(const StorePathSet & paths, SubstituteFlag m
|
||||||
std::condition_variable wakeup;
|
std::condition_variable wakeup;
|
||||||
ThreadPool pool;
|
ThreadPool pool;
|
||||||
|
|
||||||
auto doQuery = [&](const Path & path) {
|
auto doQuery = [&](const StorePath & path) {
|
||||||
checkInterrupt();
|
checkInterrupt();
|
||||||
queryPathInfo(parseStorePath(path), {[path, this, &state_, &wakeup](std::future<ref<const ValidPathInfo>> fut) {
|
queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<const ValidPathInfo>> fut) {
|
||||||
auto state(state_.lock());
|
auto state(state_.lock());
|
||||||
try {
|
try {
|
||||||
auto info = fut.get();
|
auto info = fut.get();
|
||||||
state->valid.insert(parseStorePath(path));
|
state->valid.insert(path);
|
||||||
} catch (InvalidPath &) {
|
} catch (InvalidPath &) {
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
state->exc = std::current_exception();
|
state->exc = std::current_exception();
|
||||||
|
@ -764,7 +764,7 @@ StorePathSet Store::queryValidPaths(const StorePathSet & paths, SubstituteFlag m
|
||||||
};
|
};
|
||||||
|
|
||||||
for (auto & path : paths)
|
for (auto & path : paths)
|
||||||
pool.enqueue(std::bind(doQuery, printStorePath(path))); // FIXME
|
pool.enqueue(std::bind(doQuery, path));
|
||||||
|
|
||||||
pool.process();
|
pool.process();
|
||||||
|
|
||||||
|
|
|
@ -87,6 +87,8 @@ enum BuildMode { bmNormal, bmRepair, bmCheck };
|
||||||
struct BuildResult;
|
struct BuildResult;
|
||||||
|
|
||||||
|
|
||||||
|
typedef std::map<StorePath, std::optional<ContentAddress>> StorePathCAMap;
|
||||||
|
|
||||||
struct StoreConfig : public Config
|
struct StoreConfig : public Config
|
||||||
{
|
{
|
||||||
using Config::Config;
|
using Config::Config;
|
||||||
|
@ -659,9 +661,6 @@ public:
|
||||||
return toRealPath(printStorePath(storePath));
|
return toRealPath(printStorePath(storePath));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void createUser(const std::string & userName, uid_t userId)
|
|
||||||
{ }
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Synchronises the options of the client with those of the daemon
|
* Synchronises the options of the client with those of the daemon
|
||||||
* (a no-op when there’s no daemon)
|
* (a no-op when there’s no daemon)
|
||||||
|
|
62
src/libstore/tests/derived-path.cc
Normal file
62
src/libstore/tests/derived-path.cc
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
#include <regex>
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <rapidcheck/gtest.h>
|
||||||
|
|
||||||
|
#include "tests/derived-path.hh"
|
||||||
|
#include "tests/libstore.hh"
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
Gen<DerivedPath::Opaque> Arbitrary<DerivedPath::Opaque>::arbitrary()
|
||||||
|
{
|
||||||
|
return gen::just(DerivedPath::Opaque {
|
||||||
|
.path = *gen::arbitrary<StorePath>(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Gen<DerivedPath::Built> Arbitrary<DerivedPath::Built>::arbitrary()
|
||||||
|
{
|
||||||
|
return gen::just(DerivedPath::Built {
|
||||||
|
.drvPath = *gen::arbitrary<StorePath>(),
|
||||||
|
.outputs = *gen::arbitrary<OutputsSpec>(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Gen<DerivedPath> Arbitrary<DerivedPath>::arbitrary()
|
||||||
|
{
|
||||||
|
switch (*gen::inRange<uint8_t>(0, 1)) {
|
||||||
|
case 0:
|
||||||
|
return gen::just<DerivedPath>(*gen::arbitrary<DerivedPath::Opaque>());
|
||||||
|
default:
|
||||||
|
return gen::just<DerivedPath>(*gen::arbitrary<DerivedPath::Built>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
class DerivedPathTest : public LibStoreTest
|
||||||
|
{
|
||||||
|
};
|
||||||
|
|
||||||
|
// FIXME: `RC_GTEST_FIXTURE_PROP` isn't calling `SetUpTestSuite` because it is
|
||||||
|
// no a real fixture.
|
||||||
|
//
|
||||||
|
// See https://github.com/emil-e/rapidcheck/blob/master/doc/gtest.md#rc_gtest_fixture_propfixture-name-args
|
||||||
|
TEST_F(DerivedPathTest, force_init)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
RC_GTEST_FIXTURE_PROP(
|
||||||
|
DerivedPathTest,
|
||||||
|
prop_round_rip,
|
||||||
|
(const DerivedPath & o))
|
||||||
|
{
|
||||||
|
RC_ASSERT(o == DerivedPath::parse(*store, o.to_string(*store)));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
28
src/libstore/tests/derived-path.hh
Normal file
28
src/libstore/tests/derived-path.hh
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <rapidcheck/gen/Arbitrary.h>
|
||||||
|
|
||||||
|
#include <derived-path.hh>
|
||||||
|
|
||||||
|
#include "tests/path.hh"
|
||||||
|
#include "tests/outputs-spec.hh"
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<DerivedPath::Opaque> {
|
||||||
|
static Gen<DerivedPath::Opaque> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<DerivedPath::Built> {
|
||||||
|
static Gen<DerivedPath::Built> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<DerivedPath> {
|
||||||
|
static Gen<DerivedPath> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -1,6 +1,20 @@
|
||||||
check: libstore-tests_RUN
|
check: libstore-tests-exe_RUN
|
||||||
|
|
||||||
programs += libstore-tests
|
programs += libstore-tests-exe
|
||||||
|
|
||||||
|
libstore-tests-exe_NAME = libnixstore-tests
|
||||||
|
|
||||||
|
libstore-tests-exe_DIR := $(d)
|
||||||
|
|
||||||
|
libstore-tests-exe_INSTALL_DIR :=
|
||||||
|
|
||||||
|
libstore-tests-exe_LIBS = libstore-tests
|
||||||
|
|
||||||
|
libstore-tests-exe_LDFLAGS := $(GTEST_LIBS)
|
||||||
|
|
||||||
|
libraries += libstore-tests
|
||||||
|
|
||||||
|
libstore-tests_NAME = libnixstore-tests
|
||||||
|
|
||||||
libstore-tests_DIR := $(d)
|
libstore-tests_DIR := $(d)
|
||||||
|
|
||||||
|
@ -10,6 +24,6 @@ libstore-tests_SOURCES := $(wildcard $(d)/*.cc)
|
||||||
|
|
||||||
libstore-tests_CXXFLAGS += -I src/libstore -I src/libutil
|
libstore-tests_CXXFLAGS += -I src/libstore -I src/libutil
|
||||||
|
|
||||||
libstore-tests_LIBS = libstore libutil
|
libstore-tests_LIBS = libutil-tests libstore libutil
|
||||||
|
|
||||||
libstore-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS)
|
libstore-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS)
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
#include <nlohmann/json.hpp>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
#include <rapidcheck/gtest.h>
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -199,3 +200,34 @@ TEST_JSON(ExtendedOutputsSpec, names, R"(["a","b"])", (ExtendedOutputsSpec::Expl
|
||||||
#undef TEST_JSON
|
#undef TEST_JSON
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
Gen<OutputsSpec> Arbitrary<OutputsSpec>::arbitrary()
|
||||||
|
{
|
||||||
|
switch (*gen::inRange<uint8_t>(0, 1)) {
|
||||||
|
case 0:
|
||||||
|
return gen::just((OutputsSpec) OutputsSpec::All { });
|
||||||
|
default:
|
||||||
|
return gen::just((OutputsSpec) OutputsSpec::Names {
|
||||||
|
*gen::nonEmpty(gen::container<StringSet>(gen::map(
|
||||||
|
gen::arbitrary<StorePathName>(),
|
||||||
|
[](StorePathName n) { return n.name; }))),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
RC_GTEST_PROP(
|
||||||
|
OutputsSpec,
|
||||||
|
prop_round_rip,
|
||||||
|
(const OutputsSpec & o))
|
||||||
|
{
|
||||||
|
RC_ASSERT(o == OutputsSpec::parse(o.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
17
src/libstore/tests/outputs-spec.hh
Normal file
17
src/libstore/tests/outputs-spec.hh
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <rapidcheck/gen/Arbitrary.h>
|
||||||
|
|
||||||
|
#include <outputs-spec.hh>
|
||||||
|
|
||||||
|
#include <tests/path.hh>
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<OutputsSpec> {
|
||||||
|
static Gen<OutputsSpec> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -7,7 +7,9 @@
|
||||||
#include "path-regex.hh"
|
#include "path-regex.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
|
||||||
#include "libstoretests.hh"
|
#include "tests/hash.hh"
|
||||||
|
#include "tests/libstore.hh"
|
||||||
|
#include "tests/path.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -73,17 +75,14 @@ void showValue(const StorePath & p, std::ostream & os) {
|
||||||
namespace rc {
|
namespace rc {
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
template<>
|
Gen<StorePathName> Arbitrary<StorePathName>::arbitrary()
|
||||||
struct Arbitrary<StorePath> {
|
|
||||||
static Gen<StorePath> arbitrary();
|
|
||||||
};
|
|
||||||
|
|
||||||
Gen<StorePath> Arbitrary<StorePath>::arbitrary()
|
|
||||||
{
|
{
|
||||||
auto len = *gen::inRange<size_t>(1, StorePath::MaxPathLen);
|
auto len = *gen::inRange<size_t>(
|
||||||
|
1,
|
||||||
|
StorePath::MaxPathLen - std::string_view { HASH_PART }.size());
|
||||||
|
|
||||||
std::string pre { HASH_PART "-" };
|
std::string pre;
|
||||||
pre.reserve(pre.size() + len);
|
pre.reserve(len);
|
||||||
|
|
||||||
for (size_t c = 0; c < len; ++c) {
|
for (size_t c = 0; c < len; ++c) {
|
||||||
switch (auto i = *gen::inRange<uint8_t>(0, 10 + 2 * 26 + 6)) {
|
switch (auto i = *gen::inRange<uint8_t>(0, 10 + 2 * 26 + 6)) {
|
||||||
|
@ -118,7 +117,17 @@ Gen<StorePath> Arbitrary<StorePath>::arbitrary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return gen::just(StorePath { pre });
|
return gen::just(StorePathName {
|
||||||
|
.name = std::move(pre),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Gen<StorePath> Arbitrary<StorePath>::arbitrary()
|
||||||
|
{
|
||||||
|
return gen::just(StorePath {
|
||||||
|
*gen::arbitrary<Hash>(),
|
||||||
|
(*gen::arbitrary<StorePathName>()).name,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace rc
|
} // namespace rc
|
||||||
|
|
28
src/libstore/tests/path.hh
Normal file
28
src/libstore/tests/path.hh
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <rapidcheck/gen/Arbitrary.h>
|
||||||
|
|
||||||
|
#include <path.hh>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
struct StorePathName {
|
||||||
|
std::string name;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<StorePathName> {
|
||||||
|
static Gen<StorePathName> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<StorePath> {
|
||||||
|
static Gen<StorePath> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -324,7 +324,7 @@ MultiCommand::MultiCommand(const Commands & commands_)
|
||||||
expectArgs({
|
expectArgs({
|
||||||
.label = "subcommand",
|
.label = "subcommand",
|
||||||
.optional = true,
|
.optional = true,
|
||||||
.handler = {[=](std::string s) {
|
.handler = {[=,this](std::string s) {
|
||||||
assert(!command);
|
assert(!command);
|
||||||
auto i = commands.find(s);
|
auto i = commands.find(s);
|
||||||
if (i == commands.end()) {
|
if (i == commands.end()) {
|
||||||
|
|
|
@ -209,7 +209,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
|
||||||
.description = fmt("Set the `%s` setting.", name),
|
.description = fmt("Set the `%s` setting.", name),
|
||||||
.category = category,
|
.category = category,
|
||||||
.labels = {"value"},
|
.labels = {"value"},
|
||||||
.handler = {[=](std::string s) { overridden = true; set(s); }},
|
.handler = {[this](std::string s) { overridden = true; set(s); }},
|
||||||
});
|
});
|
||||||
|
|
||||||
if (isAppendable())
|
if (isAppendable())
|
||||||
|
@ -218,7 +218,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
|
||||||
.description = fmt("Append to the `%s` setting.", name),
|
.description = fmt("Append to the `%s` setting.", name),
|
||||||
.category = category,
|
.category = category,
|
||||||
.labels = {"value"},
|
.labels = {"value"},
|
||||||
.handler = {[=](std::string s) { overridden = true; set(s, true); }},
|
.handler = {[this](std::string s) { overridden = true; set(s, true); }},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,13 +270,13 @@ template<> void BaseSetting<bool>::convertToArg(Args & args, const std::string &
|
||||||
.longName = name,
|
.longName = name,
|
||||||
.description = fmt("Enable the `%s` setting.", name),
|
.description = fmt("Enable the `%s` setting.", name),
|
||||||
.category = category,
|
.category = category,
|
||||||
.handler = {[=]() { override(true); }}
|
.handler = {[this]() { override(true); }}
|
||||||
});
|
});
|
||||||
args.addFlag({
|
args.addFlag({
|
||||||
.longName = "no-" + name,
|
.longName = "no-" + name,
|
||||||
.description = fmt("Disable the `%s` setting.", name),
|
.description = fmt("Disable the `%s` setting.", name),
|
||||||
.category = category,
|
.category = category,
|
||||||
.handler = {[=]() { override(false); }}
|
.handler = {[this]() { override(false); }}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,11 +250,15 @@ public:
|
||||||
operator const T &() const { return value; }
|
operator const T &() const { return value; }
|
||||||
operator T &() { return value; }
|
operator T &() { return value; }
|
||||||
const T & get() const { return value; }
|
const T & get() const { return value; }
|
||||||
bool operator ==(const T & v2) const { return value == v2; }
|
template<typename U>
|
||||||
bool operator !=(const T & v2) const { return value != v2; }
|
bool operator ==(const U & v2) const { return value == v2; }
|
||||||
void operator =(const T & v) { assign(v); }
|
template<typename U>
|
||||||
|
bool operator !=(const U & v2) const { return value != v2; }
|
||||||
|
template<typename U>
|
||||||
|
void operator =(const U & v) { assign(v); }
|
||||||
virtual void assign(const T & v) { value = v; }
|
virtual void assign(const T & v) { value = v; }
|
||||||
void setDefault(const T & v) { if (!overridden) value = v; }
|
template<typename U>
|
||||||
|
void setDefault(const U & v) { if (!overridden) value = v; }
|
||||||
|
|
||||||
void set(const std::string & str, bool append = false) override;
|
void set(const std::string & str, bool append = false) override;
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
|
||||||
{ Xp::ReplFlake, "repl-flake" },
|
{ Xp::ReplFlake, "repl-flake" },
|
||||||
{ Xp::AutoAllocateUids, "auto-allocate-uids" },
|
{ Xp::AutoAllocateUids, "auto-allocate-uids" },
|
||||||
{ Xp::Cgroups, "cgroups" },
|
{ Xp::Cgroups, "cgroups" },
|
||||||
|
{ Xp::DiscardReferences, "discard-references" },
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
|
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
|
||||||
|
|
|
@ -25,6 +25,7 @@ enum struct ExperimentalFeature
|
||||||
ReplFlake,
|
ReplFlake,
|
||||||
AutoAllocateUids,
|
AutoAllocateUids,
|
||||||
Cgroups,
|
Cgroups,
|
||||||
|
DiscardReferences,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
100
src/libutil/namespaces.cc
Normal file
100
src/libutil/namespaces.cc
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
#if __linux__
|
||||||
|
|
||||||
|
#include "namespaces.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "finally.hh"
|
||||||
|
|
||||||
|
#include <mntent.h>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
bool userNamespacesSupported()
|
||||||
|
{
|
||||||
|
static auto res = [&]() -> bool
|
||||||
|
{
|
||||||
|
if (!pathExists("/proc/self/ns/user")) {
|
||||||
|
debug("'/proc/self/ns/user' does not exist; your kernel was likely built without CONFIG_USER_NS=y");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Path maxUserNamespaces = "/proc/sys/user/max_user_namespaces";
|
||||||
|
if (!pathExists(maxUserNamespaces) ||
|
||||||
|
trim(readFile(maxUserNamespaces)) == "0")
|
||||||
|
{
|
||||||
|
debug("user namespaces appear to be disabled; check '/proc/sys/user/max_user_namespaces'");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Path procSysKernelUnprivilegedUsernsClone = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||||
|
if (pathExists(procSysKernelUnprivilegedUsernsClone)
|
||||||
|
&& trim(readFile(procSysKernelUnprivilegedUsernsClone)) == "0")
|
||||||
|
{
|
||||||
|
debug("user namespaces appear to be disabled; check '/proc/sys/kernel/unprivileged_userns_clone'");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pid pid = startProcess([&]()
|
||||||
|
{
|
||||||
|
auto res = unshare(CLONE_NEWUSER);
|
||||||
|
_exit(res ? 1 : 0);
|
||||||
|
});
|
||||||
|
|
||||||
|
bool supported = pid.wait() == 0;
|
||||||
|
|
||||||
|
if (!supported)
|
||||||
|
debug("user namespaces do not work on this system");
|
||||||
|
|
||||||
|
return supported;
|
||||||
|
}();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool mountNamespacesSupported()
|
||||||
|
{
|
||||||
|
static auto res = [&]() -> bool
|
||||||
|
{
|
||||||
|
bool useUserNamespace = userNamespacesSupported();
|
||||||
|
|
||||||
|
Pid pid = startProcess([&]()
|
||||||
|
{
|
||||||
|
auto res = unshare(CLONE_NEWNS | (useUserNamespace ? CLONE_NEWUSER : 0));
|
||||||
|
_exit(res ? 1 : 0);
|
||||||
|
});
|
||||||
|
|
||||||
|
bool supported = pid.wait() == 0;
|
||||||
|
|
||||||
|
if (!supported)
|
||||||
|
debug("mount namespaces do not work on this system");
|
||||||
|
|
||||||
|
return supported;
|
||||||
|
}();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool pidNamespacesSupported()
|
||||||
|
{
|
||||||
|
static auto res = [&]() -> bool
|
||||||
|
{
|
||||||
|
/* Check whether /proc is fully visible, i.e. there are no
|
||||||
|
filesystems mounted on top of files inside /proc. If this
|
||||||
|
is not the case, then we cannot mount a new /proc inside
|
||||||
|
the sandbox that matches the sandbox's PID namespace.
|
||||||
|
See https://lore.kernel.org/lkml/87tvsrjai0.fsf@xmission.com/T/. */
|
||||||
|
auto fp = fopen("/proc/mounts", "r");
|
||||||
|
if (!fp) return false;
|
||||||
|
Finally delFP = [&]() { fclose(fp); };
|
||||||
|
|
||||||
|
while (auto ent = getmntent(fp))
|
||||||
|
if (hasPrefix(std::string_view(ent->mnt_dir), "/proc/")) {
|
||||||
|
debug("PID namespaces do not work because /proc is not fully visible; disabling sandboxing");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
15
src/libutil/namespaces.hh
Normal file
15
src/libutil/namespaces.hh
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
#if __linux__
|
||||||
|
|
||||||
|
bool userNamespacesSupported();
|
||||||
|
|
||||||
|
bool mountNamespacesSupported();
|
||||||
|
|
||||||
|
bool pidNamespacesSupported();
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
|
@ -1,5 +1,12 @@
|
||||||
#include "hash.hh"
|
#include <regex>
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
#include <rapidcheck/gtest.h>
|
||||||
|
|
||||||
|
#include <hash.hh>
|
||||||
|
|
||||||
|
#include "tests/hash.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -73,3 +80,16 @@ namespace nix {
|
||||||
"c7d329eeb6dd26545e96e55b874be909");
|
"c7d329eeb6dd26545e96e55b874be909");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
Gen<Hash> Arbitrary<Hash>::arbitrary()
|
||||||
|
{
|
||||||
|
Hash hash(htSHA1);
|
||||||
|
for (size_t i = 0; i < hash.hashSize; ++i)
|
||||||
|
hash.hash[i] = *gen::arbitrary<uint8_t>();
|
||||||
|
return gen::just(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
15
src/libutil/tests/hash.hh
Normal file
15
src/libutil/tests/hash.hh
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <rapidcheck/gen/Arbitrary.h>
|
||||||
|
|
||||||
|
#include <hash.hh>
|
||||||
|
|
||||||
|
namespace rc {
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Arbitrary<Hash> {
|
||||||
|
static Gen<Hash> arbitrary();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -2,14 +2,28 @@ check: libutil-tests_RUN
|
||||||
|
|
||||||
programs += libutil-tests
|
programs += libutil-tests
|
||||||
|
|
||||||
|
libutil-tests-exe_NAME = libnixutil-tests
|
||||||
|
|
||||||
|
libutil-tests-exe_DIR := $(d)
|
||||||
|
|
||||||
|
libutil-tests-exe_INSTALL_DIR :=
|
||||||
|
|
||||||
|
libutil-tests-exe_LIBS = libutil-tests
|
||||||
|
|
||||||
|
libutil-tests-exe_LDFLAGS := $(GTEST_LIBS)
|
||||||
|
|
||||||
|
libraries += libutil-tests
|
||||||
|
|
||||||
|
libutil-tests_NAME = libnixutil-tests
|
||||||
|
|
||||||
libutil-tests_DIR := $(d)
|
libutil-tests_DIR := $(d)
|
||||||
|
|
||||||
libutil-tests_INSTALL_DIR :=
|
libutil-tests_INSTALL_DIR :=
|
||||||
|
|
||||||
libutil-tests_SOURCES := $(wildcard $(d)/*.cc)
|
libutil-tests_SOURCES := $(wildcard $(d)/*.cc)
|
||||||
|
|
||||||
libutil-tests_CXXFLAGS += -I src/libutil -I src/libexpr
|
libutil-tests_CXXFLAGS += -I src/libutil
|
||||||
|
|
||||||
libutil-tests_LIBS = libutil
|
libutil-tests_LIBS = libutil
|
||||||
|
|
||||||
libutil-tests_LDFLAGS := $(GTEST_LIBS)
|
libutil-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS)
|
||||||
|
|
|
@ -99,6 +99,27 @@ namespace nix {
|
||||||
ASSERT_EQ(parsed, expected);
|
ASSERT_EQ(parsed, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(parseURL, parsesFilePlusHttpsUrl) {
|
||||||
|
auto s = "file+https://www.example.org/video.mp4";
|
||||||
|
auto parsed = parseURL(s);
|
||||||
|
|
||||||
|
ParsedURL expected {
|
||||||
|
.url = "file+https://www.example.org/video.mp4",
|
||||||
|
.base = "https://www.example.org/video.mp4",
|
||||||
|
.scheme = "file+https",
|
||||||
|
.authority = "www.example.org",
|
||||||
|
.path = "/video.mp4",
|
||||||
|
.query = (StringMap) { },
|
||||||
|
.fragment = "",
|
||||||
|
};
|
||||||
|
|
||||||
|
ASSERT_EQ(parsed, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(parseURL, rejectsAuthorityInUrlsWithFileTransportation) {
|
||||||
|
auto s = "file://www.example.org/video.mp4";
|
||||||
|
ASSERT_THROW(parseURL(s), Error);
|
||||||
|
}
|
||||||
|
|
||||||
TEST(parseURL, parseIPv4Address) {
|
TEST(parseURL, parseIPv4Address) {
|
||||||
auto s = "http://127.0.0.1:8080/file.tar.gz?download=fast&when=now#hello";
|
auto s = "http://127.0.0.1:8080/file.tar.gz?download=fast&when=now#hello";
|
||||||
|
|
|
@ -30,13 +30,13 @@ ParsedURL parseURL(const std::string & url)
|
||||||
auto & query = match[6];
|
auto & query = match[6];
|
||||||
auto & fragment = match[7];
|
auto & fragment = match[7];
|
||||||
|
|
||||||
auto isFile = scheme.find("file") != std::string::npos;
|
auto transportIsFile = parseUrlScheme(scheme).transport == "file";
|
||||||
|
|
||||||
if (authority && *authority != "" && isFile)
|
if (authority && *authority != "" && transportIsFile)
|
||||||
throw BadURL("file:// URL '%s' has unexpected authority '%s'",
|
throw BadURL("file:// URL '%s' has unexpected authority '%s'",
|
||||||
url, *authority);
|
url, *authority);
|
||||||
|
|
||||||
if (isFile && path.empty())
|
if (transportIsFile && path.empty())
|
||||||
path = "/";
|
path = "/";
|
||||||
|
|
||||||
return ParsedURL{
|
return ParsedURL{
|
||||||
|
|
|
@ -537,6 +537,16 @@ std::string getUserName()
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Path getHomeOf(uid_t userId)
|
||||||
|
{
|
||||||
|
std::vector<char> buf(16384);
|
||||||
|
struct passwd pwbuf;
|
||||||
|
struct passwd * pw;
|
||||||
|
if (getpwuid_r(userId, &pwbuf, buf.data(), buf.size(), &pw) != 0
|
||||||
|
|| !pw || !pw->pw_dir || !pw->pw_dir[0])
|
||||||
|
throw Error("cannot determine user's home directory");
|
||||||
|
return pw->pw_dir;
|
||||||
|
}
|
||||||
|
|
||||||
Path getHome()
|
Path getHome()
|
||||||
{
|
{
|
||||||
|
@ -558,13 +568,7 @@ Path getHome()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!homeDir) {
|
if (!homeDir) {
|
||||||
std::vector<char> buf(16384);
|
homeDir = getHomeOf(geteuid());
|
||||||
struct passwd pwbuf;
|
|
||||||
struct passwd * pw;
|
|
||||||
if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0
|
|
||||||
|| !pw || !pw->pw_dir || !pw->pw_dir[0])
|
|
||||||
throw Error("cannot determine user's home directory");
|
|
||||||
homeDir = pw->pw_dir;
|
|
||||||
if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) {
|
if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) {
|
||||||
warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir);
|
warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir);
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,6 +137,9 @@ void deletePath(const Path & path, uint64_t & bytesFreed);
|
||||||
|
|
||||||
std::string getUserName();
|
std::string getUserName();
|
||||||
|
|
||||||
|
/* Return the given user's home directory from /etc/passwd. */
|
||||||
|
Path getHomeOf(uid_t userId);
|
||||||
|
|
||||||
/* Return $HOME or the user's home directory from /etc/passwd. */
|
/* Return $HOME or the user's home directory from /etc/passwd. */
|
||||||
Path getHome();
|
Path getHome();
|
||||||
|
|
||||||
|
|
|
@ -541,7 +541,9 @@ static void main_nix_build(int argc, char * * argv)
|
||||||
"SHELL=%5%; "
|
"SHELL=%5%; "
|
||||||
"BASH=%5%; "
|
"BASH=%5%; "
|
||||||
"set +e; "
|
"set +e; "
|
||||||
R"s([ -n "$PS1" -a -z "$NIX_SHELL_PRESERVE_PROMPT" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '; )s"
|
R"s([ -n "$PS1" -a -z "$NIX_SHELL_PRESERVE_PROMPT" ] && )s" +
|
||||||
|
(getuid() == 0 ? R"s(PS1='\n\[\033[1;31m\][nix-shell:\w]\$\[\033[0m\] '; )s"
|
||||||
|
: R"s(PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '; )s") +
|
||||||
"if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; "
|
"if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; "
|
||||||
"unset NIX_ENFORCE_PURITY; "
|
"unset NIX_ENFORCE_PURITY; "
|
||||||
"shopt -u nullglob; "
|
"shopt -u nullglob; "
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
|
#include "profiles.hh"
|
||||||
#include "shared.hh"
|
#include "shared.hh"
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "filetransfer.hh"
|
#include "filetransfer.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "legacy.hh"
|
#include "legacy.hh"
|
||||||
|
#include "util.hh"
|
||||||
#include "tarball.hh"
|
#include "tarball.hh"
|
||||||
|
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
@ -166,7 +168,7 @@ static int main_nix_channel(int argc, char ** argv)
|
||||||
nixDefExpr = home + "/.nix-defexpr";
|
nixDefExpr = home + "/.nix-defexpr";
|
||||||
|
|
||||||
// Figure out the name of the channels profile.
|
// Figure out the name of the channels profile.
|
||||||
profile = fmt("%s/profiles/per-user/%s/channels", settings.nixStateDir, getUserName());
|
profile = profilesDir() + "/channels";
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
cNone,
|
cNone,
|
||||||
|
|
|
@ -34,6 +34,43 @@
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
using namespace nix::daemon;
|
using namespace nix::daemon;
|
||||||
|
|
||||||
|
struct UserSettings : Config {
|
||||||
|
|
||||||
|
Setting<Strings> trustedUsers{
|
||||||
|
this, {"root"}, "trusted-users",
|
||||||
|
R"(
|
||||||
|
A list of names of users (separated by whitespace) that have
|
||||||
|
additional rights when connecting to the Nix daemon, such as the
|
||||||
|
ability to specify additional binary caches, or to import unsigned
|
||||||
|
NARs. You can also specify groups by prefixing them with `@`; for
|
||||||
|
instance, `@wheel` means all users in the `wheel` group. The default
|
||||||
|
is `root`.
|
||||||
|
|
||||||
|
> **Warning**
|
||||||
|
>
|
||||||
|
> Adding a user to `trusted-users` is essentially equivalent to
|
||||||
|
> giving that user root access to the system. For example, the user
|
||||||
|
> can set `sandbox-paths` and thereby obtain read access to
|
||||||
|
> directories that are otherwise inacessible to them.
|
||||||
|
)"};
|
||||||
|
|
||||||
|
/* ?Who we trust to use the daemon in safe ways */
|
||||||
|
Setting<Strings> allowedUsers{
|
||||||
|
this, {"*"}, "allowed-users",
|
||||||
|
R"(
|
||||||
|
A list of names of users (separated by whitespace) that are allowed
|
||||||
|
to connect to the Nix daemon. As with the `trusted-users` option,
|
||||||
|
you can specify groups by prefixing them with `@`. Also, you can
|
||||||
|
allow all users by specifying `*`. The default is `*`.
|
||||||
|
|
||||||
|
Note that trusted users are always allowed to connect.
|
||||||
|
)"};
|
||||||
|
};
|
||||||
|
|
||||||
|
UserSettings userSettings;
|
||||||
|
|
||||||
|
static GlobalConfig::Register rSettings(&userSettings);
|
||||||
|
|
||||||
#ifndef __linux__
|
#ifndef __linux__
|
||||||
#define SPLICE_F_MOVE 0
|
#define SPLICE_F_MOVE 0
|
||||||
static ssize_t splice(int fd_in, void *off_in, int fd_out, void *off_out, size_t len, unsigned int flags)
|
static ssize_t splice(int fd_in, void *off_in, int fd_out, void *off_out, size_t len, unsigned int flags)
|
||||||
|
@ -203,8 +240,8 @@ static void daemonLoop()
|
||||||
struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
|
struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
|
||||||
std::string group = gr ? gr->gr_name : std::to_string(peer.gid);
|
std::string group = gr ? gr->gr_name : std::to_string(peer.gid);
|
||||||
|
|
||||||
Strings trustedUsers = settings.trustedUsers;
|
Strings trustedUsers = userSettings.trustedUsers;
|
||||||
Strings allowedUsers = settings.allowedUsers;
|
Strings allowedUsers = userSettings.allowedUsers;
|
||||||
|
|
||||||
if (matchUser(user, group, trustedUsers))
|
if (matchUser(user, group, trustedUsers))
|
||||||
trusted = Trusted;
|
trusted = Trusted;
|
||||||
|
@ -241,15 +278,7 @@ static void daemonLoop()
|
||||||
// Handle the connection.
|
// Handle the connection.
|
||||||
FdSource from(remote.get());
|
FdSource from(remote.get());
|
||||||
FdSink to(remote.get());
|
FdSink to(remote.get());
|
||||||
processConnection(openUncachedStore(), from, to, trusted, NotRecursive, [&](Store & store) {
|
processConnection(openUncachedStore(), from, to, trusted, NotRecursive);
|
||||||
#if 0
|
|
||||||
/* Prevent users from doing something very dangerous. */
|
|
||||||
if (geteuid() == 0 &&
|
|
||||||
querySetting("build-users-group", "") == "")
|
|
||||||
throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!");
|
|
||||||
#endif
|
|
||||||
store.createUser(user, peer.uid);
|
|
||||||
});
|
|
||||||
|
|
||||||
exit(0);
|
exit(0);
|
||||||
}, options);
|
}, options);
|
||||||
|
@ -302,7 +331,7 @@ static void runDaemon(bool stdio)
|
||||||
/* Auth hook is empty because in this mode we blindly trust the
|
/* Auth hook is empty because in this mode we blindly trust the
|
||||||
standard streams. Limiting access to those is explicitly
|
standard streams. Limiting access to those is explicitly
|
||||||
not `nix-daemon`'s responsibility. */
|
not `nix-daemon`'s responsibility. */
|
||||||
processConnection(openUncachedStore(), from, to, Trusted, NotRecursive, [&](Store & _){});
|
processConnection(openUncachedStore(), from, to, Trusted, NotRecursive);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
daemonLoop();
|
daemonLoop();
|
||||||
|
|
|
@ -954,6 +954,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON
|
||||||
struct CmdFlakeShow : FlakeCommand, MixJSON
|
struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||||
{
|
{
|
||||||
bool showLegacy = false;
|
bool showLegacy = false;
|
||||||
|
bool showAllSystems = false;
|
||||||
|
|
||||||
CmdFlakeShow()
|
CmdFlakeShow()
|
||||||
{
|
{
|
||||||
|
@ -962,6 +963,11 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||||
.description = "Show the contents of the `legacyPackages` output.",
|
.description = "Show the contents of the `legacyPackages` output.",
|
||||||
.handler = {&showLegacy, true}
|
.handler = {&showLegacy, true}
|
||||||
});
|
});
|
||||||
|
addFlag({
|
||||||
|
.longName = "all-systems",
|
||||||
|
.description = "Show the contents of outputs for all systems.",
|
||||||
|
.handler = {&showAllSystems, true}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string description() override
|
std::string description() override
|
||||||
|
@ -982,6 +988,62 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||||
|
|
||||||
auto state = getEvalState();
|
auto state = getEvalState();
|
||||||
auto flake = std::make_shared<LockedFlake>(lockFlake());
|
auto flake = std::make_shared<LockedFlake>(lockFlake());
|
||||||
|
auto localSystem = std::string(settings.thisSystem.get());
|
||||||
|
|
||||||
|
std::function<bool(
|
||||||
|
eval_cache::AttrCursor & visitor,
|
||||||
|
const std::vector<Symbol> &attrPath,
|
||||||
|
const Symbol &attr)> hasContent;
|
||||||
|
|
||||||
|
// For frameworks it's important that structures are as lazy as possible
|
||||||
|
// to prevent infinite recursions, performance issues and errors that
|
||||||
|
// aren't related to the thing to evaluate. As a consequence, they have
|
||||||
|
// to emit more attributes than strictly (sic) necessary.
|
||||||
|
// However, these attributes with empty values are not useful to the user
|
||||||
|
// so we omit them.
|
||||||
|
hasContent = [&](
|
||||||
|
eval_cache::AttrCursor & visitor,
|
||||||
|
const std::vector<Symbol> &attrPath,
|
||||||
|
const Symbol &attr) -> bool
|
||||||
|
{
|
||||||
|
auto attrPath2(attrPath);
|
||||||
|
attrPath2.push_back(attr);
|
||||||
|
auto attrPathS = state->symbols.resolve(attrPath2);
|
||||||
|
const auto & attrName = state->symbols[attr];
|
||||||
|
|
||||||
|
auto visitor2 = visitor.getAttr(attrName);
|
||||||
|
|
||||||
|
if ((attrPathS[0] == "apps"
|
||||||
|
|| attrPathS[0] == "checks"
|
||||||
|
|| attrPathS[0] == "devShells"
|
||||||
|
|| attrPathS[0] == "legacyPackages"
|
||||||
|
|| attrPathS[0] == "packages")
|
||||||
|
&& (attrPathS.size() == 1 || attrPathS.size() == 2)) {
|
||||||
|
for (const auto &subAttr : visitor2->getAttrs()) {
|
||||||
|
if (hasContent(*visitor2, attrPath2, subAttr)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((attrPathS.size() == 1)
|
||||||
|
&& (attrPathS[0] == "formatter"
|
||||||
|
|| attrPathS[0] == "nixosConfigurations"
|
||||||
|
|| attrPathS[0] == "nixosModules"
|
||||||
|
|| attrPathS[0] == "overlays"
|
||||||
|
)) {
|
||||||
|
for (const auto &subAttr : visitor2->getAttrs()) {
|
||||||
|
if (hasContent(*visitor2, attrPath2, subAttr)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't recognize it, it's probably content
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
std::function<nlohmann::json(
|
std::function<nlohmann::json(
|
||||||
eval_cache::AttrCursor & visitor,
|
eval_cache::AttrCursor & visitor,
|
||||||
|
@ -1008,7 +1070,12 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||||
{
|
{
|
||||||
if (!json)
|
if (!json)
|
||||||
logger->cout("%s", headerPrefix);
|
logger->cout("%s", headerPrefix);
|
||||||
auto attrs = visitor.getAttrs();
|
std::vector<Symbol> attrs;
|
||||||
|
for (const auto &attr : visitor.getAttrs()) {
|
||||||
|
if (hasContent(visitor, attrPath, attr))
|
||||||
|
attrs.push_back(attr);
|
||||||
|
}
|
||||||
|
|
||||||
for (const auto & [i, attr] : enumerate(attrs)) {
|
for (const auto & [i, attr] : enumerate(attrs)) {
|
||||||
const auto & attrName = state->symbols[attr];
|
const auto & attrName = state->symbols[attr];
|
||||||
bool last = i + 1 == attrs.size();
|
bool last = i + 1 == attrs.size();
|
||||||
|
@ -1072,11 +1139,19 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||||
|| (attrPath.size() == 3 && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))
|
|| (attrPath.size() == 3 && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
if (!showAllSystems && std::string(attrPathS[1]) != localSystem) {
|
||||||
|
if (!json)
|
||||||
|
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", headerPrefix));
|
||||||
|
else {
|
||||||
|
logger->warn(fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS)));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if (visitor.isDerivation())
|
if (visitor.isDerivation())
|
||||||
showDerivation();
|
showDerivation();
|
||||||
else
|
else
|
||||||
throw Error("expected a derivation");
|
throw Error("expected a derivation");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
else if (attrPath.size() > 0 && attrPathS[0] == "hydraJobs") {
|
else if (attrPath.size() > 0 && attrPathS[0] == "hydraJobs") {
|
||||||
if (visitor.isDerivation())
|
if (visitor.isDerivation())
|
||||||
|
@ -1094,6 +1169,12 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||||
else {
|
else {
|
||||||
logger->warn(fmt("%s omitted (use '--legacy' to show)", concatStringsSep(".", attrPathS)));
|
logger->warn(fmt("%s omitted (use '--legacy' to show)", concatStringsSep(".", attrPathS)));
|
||||||
}
|
}
|
||||||
|
} else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) {
|
||||||
|
if (!json)
|
||||||
|
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", headerPrefix));
|
||||||
|
else {
|
||||||
|
logger->warn(fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS)));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (visitor.isDerivation())
|
if (visitor.isDerivation())
|
||||||
showDerivation();
|
showDerivation();
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
#include "command.hh"
|
#include "command.hh"
|
||||||
#include "shared.hh"
|
#include "shared.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
#include "finally.hh"
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
struct CmdPingStore : StoreCommand
|
struct CmdPingStore : StoreCommand, MixJSON
|
||||||
{
|
{
|
||||||
std::string description() override
|
std::string description() override
|
||||||
{
|
{
|
||||||
|
@ -20,10 +23,21 @@ struct CmdPingStore : StoreCommand
|
||||||
|
|
||||||
void run(ref<Store> store) override
|
void run(ref<Store> store) override
|
||||||
{
|
{
|
||||||
|
if (!json) {
|
||||||
notice("Store URL: %s", store->getUri());
|
notice("Store URL: %s", store->getUri());
|
||||||
store->connect();
|
store->connect();
|
||||||
if (auto version = store->getVersion())
|
if (auto version = store->getVersion())
|
||||||
notice("Version: %s", *version);
|
notice("Version: %s", *version);
|
||||||
|
} else {
|
||||||
|
nlohmann::json res;
|
||||||
|
Finally printRes([&]() {
|
||||||
|
logger->cout("%s", res);
|
||||||
|
});
|
||||||
|
res["url"] = store->getUri();
|
||||||
|
store->connect();
|
||||||
|
if (auto version = store->getVersion())
|
||||||
|
res["version"] = *version;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -56,8 +56,8 @@ struct CmdSearch : InstallableCommand, MixJSON
|
||||||
Strings getDefaultFlakeAttrPaths() override
|
Strings getDefaultFlakeAttrPaths() override
|
||||||
{
|
{
|
||||||
return {
|
return {
|
||||||
"packages." + settings.thisSystem.get() + ".",
|
"packages." + settings.thisSystem.get(),
|
||||||
"legacyPackages." + settings.thisSystem.get() + "."
|
"legacyPackages." + settings.thisSystem.get()
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,14 +81,14 @@ struct CmdVerify : StorePathsCommand
|
||||||
|
|
||||||
ThreadPool pool;
|
ThreadPool pool;
|
||||||
|
|
||||||
auto doPath = [&](const Path & storePath) {
|
auto doPath = [&](const StorePath & storePath) {
|
||||||
try {
|
try {
|
||||||
checkInterrupt();
|
checkInterrupt();
|
||||||
|
|
||||||
MaintainCount<std::atomic<size_t>> mcActive(active);
|
MaintainCount<std::atomic<size_t>> mcActive(active);
|
||||||
update();
|
update();
|
||||||
|
|
||||||
auto info = store->queryPathInfo(store->parseStorePath(storePath));
|
auto info = store->queryPathInfo(storePath);
|
||||||
|
|
||||||
// Note: info->path can be different from storePath
|
// Note: info->path can be different from storePath
|
||||||
// for binary cache stores when using --all (since we
|
// for binary cache stores when using --all (since we
|
||||||
|
@ -173,7 +173,7 @@ struct CmdVerify : StorePathsCommand
|
||||||
};
|
};
|
||||||
|
|
||||||
for (auto & storePath : storePaths)
|
for (auto & storePath : storePaths)
|
||||||
pool.enqueue(std::bind(doPath, store->printStorePath(storePath)));
|
pool.enqueue(std::bind(doPath, storePath));
|
||||||
|
|
||||||
pool.process();
|
pool.process();
|
||||||
|
|
||||||
|
|
|
@ -67,4 +67,11 @@ rec {
|
||||||
disallowedReferences = [test5];
|
disallowedReferences = [test5];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
test11 = makeTest 11 {
|
||||||
|
__structuredAttrs = true;
|
||||||
|
unsafeDiscardReferences.out = true;
|
||||||
|
outputChecks.out.allowedReferences = [];
|
||||||
|
buildCommand = ''echo ${dep} > "''${outputs[out]}"'';
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,3 +40,12 @@ nix-build -o $RESULT check-refs.nix -A test7
|
||||||
|
|
||||||
# test10 should succeed (no disallowed references).
|
# test10 should succeed (no disallowed references).
|
||||||
nix-build -o $RESULT check-refs.nix -A test10
|
nix-build -o $RESULT check-refs.nix -A test10
|
||||||
|
|
||||||
|
if isDaemonNewer 2.12pre20230103; then
|
||||||
|
enableFeatures discard-references
|
||||||
|
restartDaemon
|
||||||
|
|
||||||
|
# test11 should succeed.
|
||||||
|
test11=$(nix-build -o $RESULT check-refs.nix -A test11)
|
||||||
|
[[ -z $(nix-store -q --references "$test11") ]]
|
||||||
|
fi
|
||||||
|
|
|
@ -62,7 +62,7 @@ readLink() {
|
||||||
}
|
}
|
||||||
|
|
||||||
clearProfiles() {
|
clearProfiles() {
|
||||||
profiles="$NIX_STATE_DIR"/profiles
|
profiles="$HOME"/.local/share/nix/profiles
|
||||||
rm -rf $profiles
|
rm -rf $profiles
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,10 @@ writeSimpleFlake() {
|
||||||
foo = import ./simple.nix;
|
foo = import ./simple.nix;
|
||||||
default = foo;
|
default = foo;
|
||||||
};
|
};
|
||||||
|
packages.someOtherSystem = rec {
|
||||||
|
foo = import ./simple.nix;
|
||||||
|
default = foo;
|
||||||
|
};
|
||||||
|
|
||||||
# To test "nix flake init".
|
# To test "nix flake init".
|
||||||
legacyPackages.x86_64-linux.hello = import ./simple.nix;
|
legacyPackages.x86_64-linux.hello = import ./simple.nix;
|
||||||
|
|
|
@ -41,8 +41,8 @@ cat > $templatesDir/trivial/flake.nix <<EOF
|
||||||
description = "A flake for building Hello World";
|
description = "A flake for building Hello World";
|
||||||
|
|
||||||
outputs = { self, nixpkgs }: {
|
outputs = { self, nixpkgs }: {
|
||||||
packages.x86_64-linux = rec {
|
packages.$system = rec {
|
||||||
hello = nixpkgs.legacyPackages.x86_64-linux.hello;
|
hello = nixpkgs.legacyPackages.$system.hello;
|
||||||
default = hello;
|
default = hello;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
66
tests/flakes/show.sh
Normal file
66
tests/flakes/show.sh
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
source ./common.sh
|
||||||
|
|
||||||
|
flakeDir=$TEST_ROOT/flake
|
||||||
|
mkdir -p "$flakeDir"
|
||||||
|
|
||||||
|
writeSimpleFlake "$flakeDir"
|
||||||
|
cd "$flakeDir"
|
||||||
|
|
||||||
|
|
||||||
|
# By default: Only show the packages content for the current system and no
|
||||||
|
# legacyPackages at all
|
||||||
|
nix flake show --json > show-output.json
|
||||||
|
nix eval --impure --expr '
|
||||||
|
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||||
|
in
|
||||||
|
assert show_output.packages.someOtherSystem.default == {};
|
||||||
|
assert show_output.packages.${builtins.currentSystem}.default.name == "simple";
|
||||||
|
assert show_output.legacyPackages.${builtins.currentSystem} == {};
|
||||||
|
true
|
||||||
|
'
|
||||||
|
|
||||||
|
# With `--all-systems`, show the packages for all systems
|
||||||
|
nix flake show --json --all-systems > show-output.json
|
||||||
|
nix eval --impure --expr '
|
||||||
|
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||||
|
in
|
||||||
|
assert show_output.packages.someOtherSystem.default.name == "simple";
|
||||||
|
assert show_output.legacyPackages.${builtins.currentSystem} == {};
|
||||||
|
true
|
||||||
|
'
|
||||||
|
|
||||||
|
# With `--legacy`, show the legacy packages
|
||||||
|
nix flake show --json --legacy > show-output.json
|
||||||
|
nix eval --impure --expr '
|
||||||
|
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||||
|
in
|
||||||
|
assert show_output.legacyPackages.${builtins.currentSystem}.hello.name == "simple";
|
||||||
|
true
|
||||||
|
'
|
||||||
|
|
||||||
|
# Test that attributes are only reported when they have actual content
|
||||||
|
cat >flake.nix <<EOF
|
||||||
|
{
|
||||||
|
description = "Bla bla";
|
||||||
|
|
||||||
|
outputs = inputs: rec {
|
||||||
|
apps.$system = { };
|
||||||
|
checks.$system = { };
|
||||||
|
devShells.$system = { };
|
||||||
|
legacyPackages.$system = { };
|
||||||
|
packages.$system = { };
|
||||||
|
packages.someOtherSystem = { };
|
||||||
|
|
||||||
|
formatter = { };
|
||||||
|
nixosConfigurations = { };
|
||||||
|
nixosModules = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
nix flake show --json --all-systems > show-output.json
|
||||||
|
nix eval --impure --expr '
|
||||||
|
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||||
|
in
|
||||||
|
assert show_output == { };
|
||||||
|
true
|
||||||
|
'
|
|
@ -18,6 +18,7 @@ nix_tests = \
|
||||||
fetchMercurial.sh \
|
fetchMercurial.sh \
|
||||||
gc-auto.sh \
|
gc-auto.sh \
|
||||||
user-envs.sh \
|
user-envs.sh \
|
||||||
|
user-envs-migration.sh \
|
||||||
binary-cache.sh \
|
binary-cache.sh \
|
||||||
multiple-outputs.sh \
|
multiple-outputs.sh \
|
||||||
ca/build.sh \
|
ca/build.sh \
|
||||||
|
@ -114,6 +115,7 @@ nix_tests = \
|
||||||
store-ping.sh \
|
store-ping.sh \
|
||||||
fetchClosure.sh \
|
fetchClosure.sh \
|
||||||
completions.sh \
|
completions.sh \
|
||||||
|
flakes/show.sh \
|
||||||
impure-derivations.sh \
|
impure-derivations.sh \
|
||||||
path-from-hash-part.sh \
|
path-from-hash-part.sh \
|
||||||
toString-path.sh
|
toString-path.sh
|
||||||
|
|
|
@ -12,3 +12,8 @@ nix-instantiate --eval -E '<by-relative-path/simple.nix>' --restrict-eval
|
||||||
|
|
||||||
[[ $(nix-instantiate --find-file by-absolute-path/simple.nix) = $PWD/simple.nix ]]
|
[[ $(nix-instantiate --find-file by-absolute-path/simple.nix) = $PWD/simple.nix ]]
|
||||||
[[ $(nix-instantiate --find-file by-relative-path/simple.nix) = $PWD/simple.nix ]]
|
[[ $(nix-instantiate --find-file by-relative-path/simple.nix) = $PWD/simple.nix ]]
|
||||||
|
|
||||||
|
unset NIX_PATH
|
||||||
|
|
||||||
|
[[ $(nix-instantiate --option nix-path by-relative-path=. --find-file by-relative-path/simple.nix) = "$PWD/simple.nix" ]]
|
||||||
|
[[ $(NIX_PATH= nix-instantiate --option nix-path by-relative-path=. --find-file by-relative-path/simple.nix) = "$PWD/simple.nix" ]]
|
||||||
|
|
|
@ -1,12 +1,7 @@
|
||||||
# Test whether we can run a NixOS container inside a Nix build using systemd-nspawn.
|
# Test whether we can run a NixOS container inside a Nix build using systemd-nspawn.
|
||||||
{ nixpkgs, system, overlay }:
|
{ lib, nixpkgs, ... }:
|
||||||
|
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
{
|
||||||
inherit system;
|
|
||||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
|
||||||
};
|
|
||||||
|
|
||||||
makeTest ({
|
|
||||||
name = "containers";
|
name = "containers";
|
||||||
|
|
||||||
nodes =
|
nodes =
|
||||||
|
@ -65,4 +60,4 @@ makeTest ({
|
||||||
host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]")
|
host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]")
|
||||||
'';
|
'';
|
||||||
|
|
||||||
})
|
}
|
|
@ -1,14 +1,9 @@
|
||||||
{ nixpkgs, system, overlay }:
|
{ lib, config, nixpkgs, ... }:
|
||||||
|
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
|
||||||
inherit system;
|
|
||||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
|
||||||
};
|
|
||||||
|
|
||||||
let
|
let
|
||||||
|
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||||
|
|
||||||
# Generate a fake root CA and a fake api.github.com / github.com / channels.nixos.org certificate.
|
# Generate a fake root CA and a fake api.github.com / github.com / channels.nixos.org certificate.
|
||||||
cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; }
|
cert = pkgs.runCommand "cert" { nativeBuildInputs = [ pkgs.openssl ]; }
|
||||||
''
|
''
|
||||||
mkdir -p $out
|
mkdir -p $out
|
||||||
|
|
||||||
|
@ -92,8 +87,6 @@ let
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
|
|
||||||
makeTest (
|
|
||||||
|
|
||||||
{
|
{
|
||||||
name = "github-flakes";
|
name = "github-flakes";
|
||||||
|
|
||||||
|
@ -207,4 +200,4 @@ makeTest (
|
||||||
client.succeed("nix build nixpkgs#fuse --tarball-ttl 0")
|
client.succeed("nix build nixpkgs#fuse --tarball-ttl 0")
|
||||||
'';
|
'';
|
||||||
|
|
||||||
})
|
}
|
|
@ -1,13 +1,16 @@
|
||||||
# Test ‘nix-copy-closure’.
|
# Test ‘nix-copy-closure’.
|
||||||
|
|
||||||
{ nixpkgs, system, overlay }:
|
{ lib, config, nixpkgs, hostPkgs, ... }:
|
||||||
|
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
let
|
||||||
inherit system;
|
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
|
||||||
};
|
|
||||||
|
|
||||||
makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; pkgD = pkgs.tmux; in {
|
pkgA = pkgs.cowsay;
|
||||||
|
pkgB = pkgs.wget;
|
||||||
|
pkgC = pkgs.hello;
|
||||||
|
pkgD = pkgs.tmux;
|
||||||
|
|
||||||
|
in {
|
||||||
name = "nix-copy-closure";
|
name = "nix-copy-closure";
|
||||||
|
|
||||||
nodes =
|
nodes =
|
||||||
|
@ -74,4 +77,4 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; pkgD = pk
|
||||||
# )
|
# )
|
||||||
# client.succeed("nix-store --check-validity ${pkgC}")
|
# client.succeed("nix-store --check-validity ${pkgC}")
|
||||||
'';
|
'';
|
||||||
})
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue