1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-09 12:03:55 +02:00

Merge branch 'master' into patch-3

This commit is contained in:
h0nIg 2025-06-26 23:31:24 +02:00 committed by GitHub
commit 2481e9a203
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
96 changed files with 1899 additions and 512 deletions

View file

@ -40,6 +40,7 @@ jobs:
extra_nix_config: |
sandbox = true
max-jobs = 1
- uses: DeterminateSystems/magic-nix-cache-action@main
# Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to map to the root user:
# https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces
- run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0
@ -133,6 +134,7 @@ jobs:
- uses: cachix/install-nix-action@v31
with:
install_url: https://releases.nixos.org/nix/nix-2.20.3/install
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#nix.version | tr -d \")" >> $GITHUB_ENV
- run: nix --experimental-features 'nix-command flakes' build .#dockerImage -L
- run: docker load -i ./result/image.tar.gz
@ -174,6 +176,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: |
nix build -L \
.#hydraJobs.tests.functional_user \
@ -199,4 +202,5 @@ jobs:
repository: NixOS/flake-regressions-data
path: flake-regressions/tests
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: nix build -L --out-link ./new-nix && PATH=$(pwd)/new-nix/bin:$PATH MAX_FLAKES=25 flake-regressions/eval-all.sh

View file

@ -0,0 +1,9 @@
---
synopsis: "`build-dir` no longer defaults to `$TMPDIR`"
---
The directory in which temporary build directories are created no longer defaults
to `TMPDIR` or `/tmp`, to avoid builders making their directories
world-accessible. This behavior allowed escaping the build sandbox and can
cause build impurities even when not used maliciously. We now default to `builds`
in `NIX_STATE_DIR` (which is `/nix/var/nix/builds` in the default configuration).

View file

@ -25,7 +25,7 @@ This performs the default type of installation for your platform:
We recommend the multi-user installation if it supports your platform and you can authenticate with `sudo`.
The installer can configured with various command line arguments and environment variables.
The installer can be configured with various command line arguments and environment variables.
To show available command line flags:
```console

View file

@ -53,23 +53,13 @@ Derivations can declare some infrequently used optional attributes.
- [`__structuredAttrs`]{#adv-attr-structuredAttrs}\
If the special attribute `__structuredAttrs` is set to `true`, the other derivation
attributes are serialised into a file in JSON format. The environment variable
`NIX_ATTRS_JSON_FILE` points to the exact location of that file both in a build
and a [`nix-shell`](../command-ref/nix-shell.md). This obviates the need for
[`passAsFile`](#adv-attr-passAsFile) since JSON files have no size restrictions,
unlike process environments.
attributes are serialised into a file in JSON format.
It also makes it possible to tweak derivation settings in a structured way; see
[`outputChecks`](#adv-attr-outputChecks) for example.
This obviates the need for [`passAsFile`](#adv-attr-passAsFile) since JSON files have no size restrictions, unlike process environments.
It also makes it possible to tweak derivation settings in a structured way;
see [`outputChecks`](#adv-attr-outputChecks) for example.
As a convenience to Bash builders,
Nix writes a script that initialises shell variables
corresponding to all attributes that are representable in Bash. The
environment variable `NIX_ATTRS_SH_FILE` points to the exact
location of the script, both in a build and a
[`nix-shell`](../command-ref/nix-shell.md). This includes non-nested
(associative) arrays. For example, the attribute `hardening.format = true`
ends up as the Bash associative array element `${hardening[format]}`.
See the [corresponding section in the derivation page](@docroot@/store/derivation/index.md#structured-attrs) for further details.
> **Warning**
>

View file

@ -91,3 +91,7 @@ is a JSON object with the following fields:
* `env`:
The environment passed to the `builder`.
* `structuredAttrs`:
[Strucutured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them.
Structured attributes are JSON, and thus embedded as-is.

View file

@ -138,6 +138,17 @@ See [Wikipedia](https://en.wikipedia.org/wiki/Argv) for details.
Environment variables which will be passed to the [builder](#builder) executable.
#### Structured Attributes {#structured-attrs}
Nix also has special support for embedding JSON in the derivations.
The environment variable `NIX_ATTRS_JSON_FILE` points to the exact location of that file both in a build and a [`nix-shell`](@docroot@/command-ref/nix-shell.md).
As a convenience to Bash builders, Nix writes a script that initialises shell variables corresponding to all attributes that are representable in Bash.
The environment variable `NIX_ATTRS_SH_FILE` points to the exact location of the script, both in a build and a [`nix-shell`](@docroot@/command-ref/nix-shell.md).
This includes non-nested (associative) arrays.
For example, the attribute `hardening.format = true` ends up as the Bash associative array element `${hardening[format]}`.
### Placeholders
Placeholders are opaque values used within the [process creation fields] to [store objects] for which we don't yet know [store path]s.
@ -162,7 +173,7 @@ There are two types of placeholder, corresponding to the two cases where this pr
> **Explanation**
>
> In general, we need to realise [realise] a [store object] in order to be sure to have a store object for it.
> In general, we need to [realise] a [store object] in order to be sure to have a store object for it.
> But for these two cases this is either impossible or impractical:
>
> - In the output case this is impossible:
@ -189,7 +200,7 @@ This ensures that there is a canonical [store path] used to refer to the derivat
> **Note**
>
> Currently, the canonical encoding for every derivation is the "ATerm" format,
> but this is subject to change for types derivations which are not yet stable.
> but this is subject to change for the types of derivations which are not yet stable.
Regardless of the format used, when serializing a derivation to a store object, that store object will be content-addressed.

View file

@ -18,14 +18,14 @@ In particular, the edge corresponding to a reference is from the store object th
References other than a self-reference must not form a cycle.
The graph of references excluding self-references thus forms a [directed acyclic graph].
[directed acyclic graph]: @docroot@/glossary.md#gloss-directed acyclic graph
[directed acyclic graph]: @docroot@/glossary.md#gloss-directed-acyclic-graph
We can take the [transitive closure] of the references graph, which any pair of store objects have an edge not if there is a single reference from the first to the second, but a path of one or more references from the first to the second.
The *requisites* of a store object are all store objects reachable by paths of references which start with given store object's references.
[transitive closure]: https://en.wikipedia.org/wiki/Transitive_closure
We can also take the [transpose graph] ofthe references graph, where we reverse the orientation of all edges.
We can also take the [transpose graph] of the references graph, where we reverse the orientation of all edges.
The *referrers* of a store object are the store objects that reference it.
[transpose graph]: https://en.wikipedia.org/wiki/Transpose_graph

View file

@ -1,6 +1,11 @@
{
pkgs ? import <nixpkgs> { },
lib ? pkgs.lib,
# Core dependencies
pkgs,
lib,
dockerTools,
runCommand,
buildPackages,
# Image configuration
name ? "nix",
tag ? "latest",
bundleNixpkgs ? true,
@ -14,36 +19,60 @@
gid ? 0,
uname ? "root",
gname ? "root",
Labels ? {
"org.opencontainers.image.title" = "Nix";
"org.opencontainers.image.source" = "https://github.com/NixOS/nix";
"org.opencontainers.image.vendor" = "Nix project";
"org.opencontainers.image.version" = nix.version;
"org.opencontainers.image.description" = "Nix container image";
},
Cmd ? [ (lib.getExe bashInteractive) ],
# Default Packages
nix,
bashInteractive,
coreutils-full,
gnutar,
gzip,
gnugrep,
which,
curl,
less,
wget,
man,
cacert,
findutils,
iana-etc,
gitMinimal,
openssh,
# Other dependencies
shadow,
}:
let
defaultPkgs =
with pkgs;
[
nix
bashInteractive
coreutils-full
gnutar
gzip
gnugrep
which
curl
less
wget
man
cacert.out
findutils
iana-etc
git
openssh
]
++ extraPkgs;
defaultPkgs = [
nix
bashInteractive
coreutils-full
gnutar
gzip
gnugrep
which
curl
less
wget
man
cacert.out
findutils
iana-etc
gitMinimal
openssh
] ++ extraPkgs;
users =
{
root = {
uid = 0;
shell = "${pkgs.bashInteractive}/bin/bash";
shell = lib.getExe bashInteractive;
home = "/root";
gid = 0;
groups = [ "root" ];
@ -52,7 +81,7 @@ let
nobody = {
uid = 65534;
shell = "${pkgs.shadow}/bin/nologin";
shell = lib.getExe' shadow "nologin";
home = "/var/empty";
gid = 65534;
groups = [ "nobody" ];
@ -63,7 +92,7 @@ let
// lib.optionalAttrs (uid != 0) {
"${uname}" = {
uid = uid;
shell = "${pkgs.bashInteractive}/bin/bash";
shell = lib.getExe bashInteractive;
home = "/home/${uname}";
gid = gid;
groups = [ "${gname}" ];
@ -158,15 +187,20 @@ let
baseSystem =
let
nixpkgs = pkgs.path;
channel = pkgs.runCommand "channel-nixos" { inherit bundleNixpkgs; } ''
channel = runCommand "channel-nixos" { inherit bundleNixpkgs; } ''
mkdir $out
if [ "$bundleNixpkgs" ]; then
ln -s ${nixpkgs} $out/nixpkgs
ln -s ${
builtins.path {
path = nixpkgs;
name = "source";
}
} $out/nixpkgs
echo "[]" > $out/manifest.nix
fi
'';
# doc/manual/source/command-ref/files/manifest.nix.md
manifest = pkgs.buildPackages.runCommand "manifest.nix" { } ''
manifest = buildPackages.runCommand "manifest.nix" { } ''
cat > $out <<EOF
[
${lib.concatStringsSep "\n" (
@ -195,7 +229,7 @@ let
]
EOF
'';
profile = pkgs.buildPackages.buildEnv {
profile = buildPackages.buildEnv {
name = "root-profile-env";
paths = defaultPkgs;
@ -212,7 +246,7 @@ let
else
flake-registry;
in
pkgs.runCommand "base-system"
runCommand "base-system"
{
inherit
passwdContents
@ -266,7 +300,6 @@ let
# see doc/manual/source/command-ref/files/profiles.md
ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
ln -s /nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
ln -s /nix/var/nix/profiles/default $out${userHome}/.nix-profile
# see doc/manual/source/command-ref/files/channels.md
ln -s ${channel} $out/nix/var/nix/profiles/per-user/${uname}/channels-1-link
@ -279,8 +312,8 @@ let
# may get replaced by pkgs.dockerTools.binSh & pkgs.dockerTools.usrBinEnv
mkdir -p $out/bin $out/usr/bin
ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
ln -s ${lib.getExe' coreutils-full "env"} $out/usr/bin/env
ln -s ${lib.getExe bashInteractive} $out/bin/sh
''
+ (lib.optionalString (flake-registry-path != null) ''
@ -289,13 +322,13 @@ let
globalFlakeRegistryPath="$nixCacheDir/flake-registry.json"
ln -s ${flake-registry-path} $out$globalFlakeRegistryPath
mkdir -p $out/nix/var/nix/gcroots/auto
rootName=$(${pkgs.nix}/bin/nix --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath))
rootName=$(${lib.getExe' nix "nix"} --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath))
ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName
'')
);
in
pkgs.dockerTools.buildLayeredImageWithNixDb {
dockerTools.buildLayeredImageWithNixDb {
inherit
name
@ -321,7 +354,7 @@ pkgs.dockerTools.buildLayeredImageWithNixDb {
'';
config = {
Cmd = [ "${userHome}/.nix-profile/bin/bash" ];
inherit Cmd Labels;
User = "${toString uid}:${toString gid}";
Env = [
"USER=${uname}"

View file

@ -404,8 +404,7 @@
dockerImage =
let
pkgs = nixpkgsFor.${system}.native;
image = import ./docker.nix {
inherit pkgs;
image = pkgs.callPackage ./docker.nix {
tag = pkgs.nix.version;
};
in

View file

@ -37,6 +37,118 @@
fi
''}";
};
meson-format = {
enable = true;
files = "(meson.build|meson.options)$";
entry = "${pkgs.writeScript "format-meson" ''
#!${pkgs.runtimeShell}
for file in "$@"; do
${lib.getExe pkgs.meson} format -ic ${../meson.format} "$file"
done
''}";
excludes = [
# We haven't applied formatting to these files yet
''^doc/manual/meson.build$''
''^doc/manual/source/command-ref/meson.build$''
''^doc/manual/source/development/meson.build$''
''^doc/manual/source/language/meson.build$''
''^doc/manual/source/meson.build$''
''^doc/manual/source/release-notes/meson.build$''
''^doc/manual/source/store/meson.build$''
''^misc/bash/meson.build$''
''^misc/fish/meson.build$''
''^misc/launchd/meson.build$''
''^misc/meson.build$''
''^misc/systemd/meson.build$''
''^misc/zsh/meson.build$''
''^nix-meson-build-support/$''
''^nix-meson-build-support/big-objs/meson.build$''
''^nix-meson-build-support/common/meson.build$''
''^nix-meson-build-support/deps-lists/meson.build$''
''^nix-meson-build-support/export/meson.build$''
''^nix-meson-build-support/export-all-symbols/meson.build$''
''^nix-meson-build-support/generate-header/meson.build$''
''^nix-meson-build-support/libatomic/meson.build$''
''^nix-meson-build-support/subprojects/meson.build$''
''^scripts/meson.build$''
''^src/external-api-docs/meson.build$''
''^src/internal-api-docs/meson.build$''
''^src/libcmd/include/nix/cmd/meson.build$''
''^src/libcmd/meson.build$''
''^src/libcmd/nix-meson-build-support$''
''^src/libexpr/include/nix/expr/meson.build$''
''^src/libexpr/meson.build$''
''^src/libexpr/nix-meson-build-support$''
''^src/libexpr-c/meson.build$''
''^src/libexpr-c/nix-meson-build-support$''
''^src/libexpr-test-support/meson.build$''
''^src/libexpr-test-support/nix-meson-build-support$''
''^src/libexpr-tests/meson.build$''
''^src/libexpr-tests/nix-meson-build-support$''
''^src/libfetchers/include/nix/fetchers/meson.build$''
''^src/libfetchers/meson.build$''
''^src/libfetchers/nix-meson-build-support$''
''^src/libfetchers-c/meson.build$''
''^src/libfetchers-c/nix-meson-build-support$''
''^src/libfetchers-tests/meson.build$''
''^src/libfetchers-tests/nix-meson-build-support$''
''^src/libflake/include/nix/flake/meson.build$''
''^src/libflake/meson.build$''
''^src/libflake/nix-meson-build-support$''
''^src/libflake-c/meson.build$''
''^src/libflake-c/nix-meson-build-support$''
''^src/libflake-tests/meson.build$''
''^src/libflake-tests/nix-meson-build-support$''
''^src/libmain/include/nix/main/meson.build$''
''^src/libmain/meson.build$''
''^src/libmain/nix-meson-build-support$''
''^src/libmain-c/meson.build$''
''^src/libmain-c/nix-meson-build-support$''
''^src/libstore/include/nix/store/meson.build$''
''^src/libstore/meson.build$''
''^src/libstore/nix-meson-build-support$''
''^src/libstore/unix/include/nix/store/meson.build$''
''^src/libstore/unix/meson.build$''
''^src/libstore/windows/meson.build$''
''^src/libstore-c/meson.build$''
''^src/libstore-c/nix-meson-build-support$''
''^src/libstore-test-support/include/nix/store/tests/meson.build$''
''^src/libstore-test-support/meson.build$''
''^src/libstore-test-support/nix-meson-build-support$''
''^src/libstore-tests/meson.build$''
''^src/libstore-tests/nix-meson-build-support$''
''^src/libutil/meson.build$''
''^src/libutil/nix-meson-build-support$''
''^src/libutil/unix/include/nix/util/meson.build$''
''^src/libutil/unix/meson.build$''
''^src/libutil/windows/meson.build$''
''^src/libutil-c/meson.build$''
''^src/libutil-c/nix-meson-build-support$''
''^src/libutil-test-support/include/nix/util/tests/meson.build$''
''^src/libutil-test-support/meson.build$''
''^src/libutil-test-support/nix-meson-build-support$''
''^src/libutil-tests/meson.build$''
''^src/libutil-tests/nix-meson-build-support$''
''^src/nix/meson.build$''
''^src/nix/nix-meson-build-support$''
''^src/perl/lib/Nix/meson.build$''
''^src/perl/meson.build$''
''^tests/functional/ca/meson.build$''
''^tests/functional/common/meson.build$''
''^tests/functional/dyn-drv/meson.build$''
''^tests/functional/flakes/meson.build$''
''^tests/functional/git-hashing/meson.build$''
''^tests/functional/local-overlay-store/meson.build$''
''^tests/functional/meson.build$''
''^src/libcmd/meson.options$''
''^src/libexpr/meson.options$''
''^src/libstore/meson.options$''
''^src/libutil/meson.options$''
''^src/libutil-c/meson.options$''
''^src/nix/meson.options$''
''^src/perl/meson.options$''
];
};
nixfmt-rfc-style = {
enable = true;
excludes = [

View file

@ -1,13 +1,16 @@
# This is just a stub project to include all the others as subprojects
# for development shell purposes
project('nix-dev-shell', 'cpp',
project(
'nix-dev-shell',
'cpp',
version : files('.version'),
subproject_dir : 'src',
default_options : [
'localstatedir=/nix/var',
# hack for trailing newline
],
meson_version : '>= 1.1'
meson_version : '>= 1.1',
)
# Internal Libraries

7
meson.format Normal file
View file

@ -0,0 +1,7 @@
indent_by = ' '
space_array = true
kwargs_force_multiline = false
wide_colon = true
group_arg_value = true
indent_before_comments = ' '
use_editor_config = true

View file

@ -1,13 +1,22 @@
# vim: filetype=meson
option('doc-gen', type : 'boolean', value : false,
option(
'doc-gen',
type : 'boolean',
value : false,
description : 'Generate documentation',
)
option('unit-tests', type : 'boolean', value : true,
option(
'unit-tests',
type : 'boolean',
value : true,
description : 'Build unit tests',
)
option('bindings', type : 'boolean', value : true,
option(
'bindings',
type : 'boolean',
value : true,
description : 'Build language bindings (e.g. Perl)',
)

View file

@ -1 +1,2 @@
d @localstatedir@/nix/daemon-socket 0755 root root - -
d @localstatedir@/nix/daemon-socket 0755 root root - -
d @localstatedir@/nix/builds 0755 root root 7d -

View file

@ -2,5 +2,5 @@ if host_machine.system() == 'windows'
# https://learn.microsoft.com/en-us/cpp/porting/modifying-winver-and-win32-winnt?view=msvc-170
# #define _WIN32_WINNT_WIN8 0x0602
# We currently don't use any API which requires higher than this.
add_project_arguments([ '-D_WIN32_WINNT=0x0602' ], language: 'cpp')
add_project_arguments([ '-D_WIN32_WINNT=0x0602' ], language : 'cpp')
endif

View file

@ -5,6 +5,7 @@
#include "nix/util/canon-path.hh"
#include "nix/main/common-args.hh"
#include "nix/expr/search-path.hh"
#include "nix/expr/eval-settings.hh"
#include <filesystem>
@ -15,10 +16,8 @@ class Store;
namespace fetchers { struct Settings; }
class EvalState;
struct EvalSettings;
struct CompatibilitySettings;
class Bindings;
struct SourcePath;
namespace flake { struct Settings; }

View file

@ -199,7 +199,7 @@ SourceExprCommand::SourceExprCommand()
.shortName = 'f',
.description =
"Interpret [*installables*](@docroot@/command-ref/new-cli/nix.md#installables) as attribute paths relative to the Nix expression stored in *file*. "
"If *file* is the character -, then a Nix expression will be read from standard input. "
"If *file* is the character -, then a Nix expression is read from standard input. "
"Implies `--impure`.",
.category = installablesCategory,
.labels = {"file"},

View file

@ -69,6 +69,7 @@ struct NixRepl
const static int envSize = 32768;
std::shared_ptr<StaticEnv> staticEnv;
Value lastLoaded;
Env * env;
int displ;
StringSet varNames;
@ -95,6 +96,7 @@ struct NixRepl
void loadFiles();
void loadFlakes();
void reloadFilesAndFlakes();
void showLastLoaded();
void addAttrsToScope(Value & attrs);
void addVarToScope(const Symbol name, Value & v);
Expr * parseString(std::string s);
@ -158,6 +160,8 @@ static std::ostream & showDebugTrace(std::ostream & out, const PosTable & positi
return out;
}
MakeError(IncompleteReplExpr, ParseError);
static bool isFirstRepl = true;
ReplExitStatus NixRepl::mainLoop()
@ -205,16 +209,8 @@ ReplExitStatus NixRepl::mainLoop()
default:
unreachable();
}
} catch (ParseError & e) {
if (e.msg().find("unexpected end of file") != std::string::npos) {
// For parse errors on incomplete input, we continue waiting for the next line of
// input without clearing the input so far.
continue;
} else {
printMsg(lvlError, e.msg());
}
} catch (EvalError & e) {
printMsg(lvlError, e.msg());
} catch (IncompleteReplExpr &) {
continue;
} catch (Error & e) {
printMsg(lvlError, e.msg());
} catch (Interrupted & e) {
@ -378,6 +374,7 @@ ProcessLineResult NixRepl::processLine(std::string line)
<< " current profile\n"
<< " :l, :load <path> Load Nix expression and add it to scope\n"
<< " :lf, :load-flake <ref> Load Nix flake and add it to scope\n"
<< " :ll, :last-loaded Show most recently loaded variables added to scope\n"
<< " :p, :print <expr> Evaluate and print expression recursively\n"
<< " Strings are printed directly, without escaping.\n"
<< " :q, :quit Exit nix-repl\n"
@ -468,6 +465,10 @@ ProcessLineResult NixRepl::processLine(std::string line)
loadFlake(arg);
}
else if (command == ":ll" || command == ":last-loaded") {
showLastLoaded();
}
else if (command == ":r" || command == ":reload") {
state->resetFileCache();
reloadFilesAndFlakes();
@ -483,7 +484,7 @@ ProcessLineResult NixRepl::processLine(std::string line)
auto path = state->coerceToPath(noPos, v, context, "while evaluating the filename to edit");
return {path, 0};
} else if (v.isLambda()) {
auto pos = state->positions[v.payload.lambda.fun->pos];
auto pos = state->positions[v.lambda().fun->pos];
if (auto path = std::get_if<SourcePath>(&pos.origin))
return {*path, pos.line};
else
@ -760,6 +761,16 @@ void NixRepl::initEnv()
varNames.emplace(state->symbols[i.first]);
}
void NixRepl::showLastLoaded()
{
RunPager pager;
for (auto & i : *lastLoaded.attrs()) {
std::string_view name = state->symbols[i.name];
logger->cout(name);
}
}
void NixRepl::reloadFilesAndFlakes()
{
@ -813,6 +824,27 @@ void NixRepl::addAttrsToScope(Value & attrs)
staticEnv->sort();
staticEnv->deduplicate();
notice("Added %1% variables.", attrs.attrs()->size());
lastLoaded = attrs;
const int max_print = 20;
int counter = 0;
std::ostringstream loaded;
for (auto & i : attrs.attrs()->lexicographicOrder(state->symbols)) {
if (counter >= max_print)
break;
if (counter > 0)
loaded << ", ";
printIdentifier(loaded, state->symbols[i->name]);
counter += 1;
}
notice("%1%", loaded.str());
if (attrs.attrs()->size() > max_print)
notice("... and %1% more; view with :ll", attrs.attrs()->size() - max_print);
}
@ -837,7 +869,17 @@ Expr * NixRepl::parseString(std::string s)
void NixRepl::evalString(std::string s, Value & v)
{
Expr * e = parseString(s);
Expr * e;
try {
e = parseString(s);
} catch (ParseError & e) {
if (e.msg().find("unexpected end of file") != std::string::npos)
// For parse errors on incomplete input, we continue waiting for the next line of
// input without clearing the input so far.
throw IncompleteReplExpr(e.msg());
else
throw;
}
e->eval(*state, *env, v);
state->forceValue(v, v.determinePos(noPos));
}

View file

@ -252,7 +252,7 @@ const char * nix_get_path_string(nix_c_context * context, const nix_value * valu
// We could use v.path().to_string().c_str(), but I'm concerned this
// crashes. Looks like .path() allocates a CanonPath with a copy of the
// string, then it gets the underlying data from that.
return v.payload.path.path;
return v.pathStr();
}
NIXC_CATCH_ERRS_NULL
}

View file

@ -1,9 +1,10 @@
# Public headers directory
include_dirs = [include_directories('../../..')]
include_dirs = [ include_directories('../../..') ]
headers = files(
'libexpr.hh',
'nix_api_expr.hh',
'value/context.hh',
# hack for trailing newline
)

View file

@ -667,8 +667,8 @@ namespace nix {
auto v = eval("derivation");
ASSERT_EQ(v.type(), nFunction);
ASSERT_TRUE(v.isLambda());
ASSERT_NE(v.payload.lambda.fun, nullptr);
ASSERT_TRUE(v.payload.lambda.fun->hasFormals());
ASSERT_NE(v.lambda().fun, nullptr);
ASSERT_TRUE(v.lambda().fun->hasFormals());
}
TEST_F(PrimOpTest, currentTime) {

View file

@ -204,7 +204,7 @@ FrameInfo SampleStack::getFrameInfoFromValueAndPos(const Value & v, std::span<Va
/* NOTE: No actual references to garbage collected values are not held in
the profiler. */
if (v.isLambda())
return LambdaFrameInfo{.expr = v.payload.lambda.fun, .callPos = pos};
return LambdaFrameInfo{.expr = v.lambda().fun, .callPos = pos};
else if (v.isPrimOp()) {
return getPrimOpFrameInfo(*v.primOp(), args, pos);
} else if (v.isPrimOpApp())

View file

@ -2,6 +2,7 @@
#include "nix/expr/eval-settings.hh"
#include "nix/expr/primops.hh"
#include "nix/expr/print-options.hh"
#include "nix/expr/symbol-table.hh"
#include "nix/util/exit.hh"
#include "nix/util/types.hh"
#include "nix/util/util.hh"
@ -89,6 +90,11 @@ std::string printValue(EvalState & state, Value & v)
return out.str();
}
Value * Value::toPtr(SymbolStr str) noexcept
{
return const_cast<Value *>(str.valuePtr());
}
void Value::print(EvalState & state, std::ostream & str, PrintOptions options)
{
printValue(state, str, *this, options);
@ -120,9 +126,9 @@ std::string showType(const Value & v)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wswitch-enum"
switch (v.internalType) {
case tString: return v.payload.string.context ? "a string with context" : "a string";
case tString: return v.context() ? "a string with context" : "a string";
case tPrimOp:
return fmt("the built-in function '%s'", std::string(v.payload.primOp->name));
return fmt("the built-in function '%s'", std::string(v.primOp()->name));
case tPrimOpApp:
return fmt("the partially applied built-in function '%s'", v.primOpAppPrimOp()->name);
case tExternal: return v.external()->showType();
@ -141,8 +147,8 @@ PosIdx Value::determinePos(const PosIdx pos) const
#pragma GCC diagnostic ignored "-Wswitch-enum"
switch (internalType) {
case tAttrs: return attrs()->pos;
case tLambda: return payload.lambda.fun->pos;
case tApp: return payload.app.left->determinePos(pos);
case tLambda: return lambda().fun->pos;
case tApp: return app().left->determinePos(pos);
default: return pos;
}
#pragma GCC diagnostic pop
@ -154,10 +160,10 @@ bool Value::isTrivial() const
internalType != tApp
&& internalType != tPrimOpApp
&& (internalType != tThunk
|| (dynamic_cast<ExprAttrs *>(payload.thunk.expr)
&& ((ExprAttrs *) payload.thunk.expr)->dynamicAttrs.empty())
|| dynamic_cast<ExprLambda *>(payload.thunk.expr)
|| dynamic_cast<ExprList *>(payload.thunk.expr));
|| (dynamic_cast<ExprAttrs *>(thunk().expr)
&& ((ExprAttrs *) thunk().expr)->dynamicAttrs.empty())
|| dynamic_cast<ExprLambda *>(thunk().expr)
|| dynamic_cast<ExprList *>(thunk().expr));
}
@ -519,9 +525,9 @@ std::ostream & operator<<(std::ostream & output, const PrimOp & primOp)
const PrimOp * Value::primOpAppPrimOp() const
{
Value * left = payload.primOpApp.left;
Value * left = primOpApp().left;
while (left && !left->isPrimOp()) {
left = left->payload.primOpApp.left;
left = left->primOpApp().left;
}
if (!left)
@ -604,7 +610,7 @@ std::optional<EvalState::Doc> EvalState::getDoc(Value & v)
};
}
if (v.isLambda()) {
auto exprLambda = v.payload.lambda.fun;
auto exprLambda = v.lambda().fun;
std::ostringstream s;
std::string name;
@ -1561,13 +1567,13 @@ void EvalState::callFunction(Value & fun, std::span<Value *> args, Value & vRes,
if (vCur.isLambda()) {
ExprLambda & lambda(*vCur.payload.lambda.fun);
ExprLambda & lambda(*vCur.lambda().fun);
auto size =
(!lambda.arg ? 0 : 1) +
(lambda.hasFormals() ? lambda.formals->formals.size() : 0);
Env & env2(allocEnv(size));
env2.up = vCur.payload.lambda.env;
env2.up = vCur.lambda().env;
Displacement displ = 0;
@ -1597,7 +1603,7 @@ void EvalState::callFunction(Value & fun, std::span<Value *> args, Value & vRes,
symbols[i.name])
.atPos(lambda.pos)
.withTrace(pos, "from call site")
.withFrame(*fun.payload.lambda.env, lambda)
.withFrame(*fun.lambda().env, lambda)
.debugThrow();
}
env2.values[displ++] = i.def->maybeThunk(*this, env2);
@ -1624,7 +1630,7 @@ void EvalState::callFunction(Value & fun, std::span<Value *> args, Value & vRes,
.atPos(lambda.pos)
.withTrace(pos, "from call site")
.withSuggestions(suggestions)
.withFrame(*fun.payload.lambda.env, lambda)
.withFrame(*fun.lambda().env, lambda)
.debugThrow();
}
unreachable();
@ -1696,7 +1702,7 @@ void EvalState::callFunction(Value & fun, std::span<Value *> args, Value & vRes,
Value * primOp = &vCur;
while (primOp->isPrimOpApp()) {
argsDone++;
primOp = primOp->payload.primOpApp.left;
primOp = primOp->primOpApp().left;
}
assert(primOp->isPrimOp());
auto arity = primOp->primOp()->arity;
@ -1712,8 +1718,8 @@ void EvalState::callFunction(Value & fun, std::span<Value *> args, Value & vRes,
Value * vArgs[maxPrimOpArity];
auto n = argsDone;
for (Value * arg = &vCur; arg->isPrimOpApp(); arg = arg->payload.primOpApp.left)
vArgs[--n] = arg->payload.primOpApp.right;
for (Value * arg = &vCur; arg->isPrimOpApp(); arg = arg->primOpApp().left)
vArgs[--n] = arg->primOpApp().right;
for (size_t i = 0; i < argsLeft; ++i)
vArgs[argsDone + i] = args[i];
@ -1819,14 +1825,14 @@ void EvalState::autoCallFunction(const Bindings & args, Value & fun, Value & res
}
}
if (!fun.isLambda() || !fun.payload.lambda.fun->hasFormals()) {
if (!fun.isLambda() || !fun.lambda().fun->hasFormals()) {
res = fun;
return;
}
auto attrs = buildBindings(std::max(static_cast<uint32_t>(fun.payload.lambda.fun->formals->formals.size()), args.size()));
auto attrs = buildBindings(std::max(static_cast<uint32_t>(fun.lambda().fun->formals->formals.size()), args.size()));
if (fun.payload.lambda.fun->formals->ellipsis) {
if (fun.lambda().fun->formals->ellipsis) {
// If the formals have an ellipsis (eg the function accepts extra args) pass
// all available automatic arguments (which includes arguments specified on
// the command line via --arg/--argstr)
@ -1834,7 +1840,7 @@ void EvalState::autoCallFunction(const Bindings & args, Value & fun, Value & res
attrs.insert(v);
} else {
// Otherwise, only pass the arguments that the function accepts
for (auto & i : fun.payload.lambda.fun->formals->formals) {
for (auto & i : fun.lambda().fun->formals->formals) {
auto j = args.get(i.name);
if (j) {
attrs.insert(*j);
@ -1844,7 +1850,7 @@ Nix attempted to evaluate a function as a top level expression; in
this case it must have its arguments supplied either by default
values, or passed explicitly with '--arg' or '--argstr'. See
https://nixos.org/manual/nix/stable/language/constructs.html#functions.)", symbols[i.name])
.atPos(i.pos).withFrame(*fun.payload.lambda.env, *fun.payload.lambda.fun).debugThrow();
.atPos(i.pos).withFrame(*fun.lambda().env, *fun.lambda().fun).debugThrow();
}
}
}
@ -2157,7 +2163,7 @@ void EvalState::forceValueDeep(Value & v)
try {
// If the value is a thunk, we're evaling. Otherwise no trace necessary.
auto dts = debugRepl && i.value->isThunk()
? makeDebugTraceStacker(*this, *i.value->payload.thunk.expr, *i.value->payload.thunk.env, i.pos,
? makeDebugTraceStacker(*this, *i.value->thunk().expr, *i.value->thunk().env, i.pos,
"while evaluating the attribute '%1%'", symbols[i.name])
: nullptr;
@ -2291,8 +2297,8 @@ std::string_view EvalState::forceString(Value & v, const PosIdx pos, std::string
void copyContext(const Value & v, NixStringContext & context, const ExperimentalFeatureSettings & xpSettings)
{
if (v.payload.string.context)
for (const char * * p = v.payload.string.context; *p; ++p)
if (v.context())
for (const char * * p = v.context(); *p; ++p)
context.insert(NixStringContextElem::parse(*p, xpSettings));
}
@ -2362,7 +2368,7 @@ BackedStringView EvalState::coerceToString(
!canonicalizePath && !copyToStore
? // FIXME: hack to preserve path literals that end in a
// slash, as in /foo/${x}.
v.payload.path.path
v.pathStr()
: copyToStore
? store->printStorePath(copyPathToStore(context, v.path()))
: std::string(v.path().path.abs());
@ -2630,14 +2636,14 @@ void EvalState::assertEqValues(Value & v1, Value & v2, const PosIdx pos, std::st
return;
case nPath:
if (v1.payload.path.accessor != v2.payload.path.accessor) {
if (v1.pathAccessor() != v2.pathAccessor()) {
error<AssertionError>(
"path '%s' is not equal to path '%s' because their accessors are different",
ValuePrinter(*this, v1, errorPrintOptions),
ValuePrinter(*this, v2, errorPrintOptions))
.debugThrow();
}
if (strcmp(v1.payload.path.path, v2.payload.path.path) != 0) {
if (strcmp(v1.pathStr(), v2.pathStr()) != 0) {
error<AssertionError>(
"path '%s' is not equal to path '%s'",
ValuePrinter(*this, v1, errorPrintOptions),
@ -2804,8 +2810,8 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v
case nPath:
return
// FIXME: compare accessors by their fingerprint.
v1.payload.path.accessor == v2.payload.path.accessor
&& strcmp(v1.payload.path.path, v2.payload.path.path) == 0;
v1.pathAccessor() == v2.pathAccessor()
&& strcmp(v1.pathStr(), v2.pathStr()) == 0;
case nNull:
return true;
@ -3013,7 +3019,7 @@ void EvalState::printStatistics()
// XXX: overrides earlier assignment
topObj["symbols"] = json::array();
auto &list = topObj["symbols"];
symbols.dump([&](const std::string & s) { list.emplace_back(s); });
symbols.dump([&](std::string_view s) { list.emplace_back(s); });
}
if (outPath == "-") {
std::cerr << topObj.dump(2) << std::endl;

View file

@ -89,9 +89,9 @@ Env & EvalState::allocEnv(size_t size)
void EvalState::forceValue(Value & v, const PosIdx pos)
{
if (v.isThunk()) {
Env * env = v.payload.thunk.env;
Env * env = v.thunk().env;
assert(env || v.isBlackhole());
Expr * expr = v.payload.thunk.expr;
Expr * expr = v.thunk().expr;
try {
v.mkBlackhole();
//checkInterrupt();
@ -106,7 +106,7 @@ void EvalState::forceValue(Value & v, const PosIdx pos)
}
}
else if (v.isApp())
callFunction(*v.payload.app.left, *v.payload.app.right, v, pos);
callFunction(*v.app().left, *v.app().right, v, pos);
}

View file

@ -132,7 +132,7 @@ struct EvalSettings : Config
Setting<bool> restrictEval{
this, false, "restrict-eval",
R"(
If set to `true`, the Nix evaluator will not allow access to any
If set to `true`, the Nix evaluator doesn't allow access to any
files outside of
[`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath),
or to URIs outside of
@ -157,7 +157,7 @@ struct EvalSettings : Config
R"(
By default, Nix allows [Import from Derivation](@docroot@/language/import-from-derivation.md).
When this setting is `true`, Nix will log a warning indicating that it performed such an import.
When this setting is `true`, Nix logs a warning indicating that it performed such an import.
This option has no effect if `allow-import-from-derivation` is disabled.
)"
};
@ -167,9 +167,9 @@ struct EvalSettings : Config
R"(
By default, Nix allows [Import from Derivation](@docroot@/language/import-from-derivation.md).
With this option set to `false`, Nix will throw an error when evaluating an expression that uses this feature,
With this option set to `false`, Nix throws an error when evaluating an expression that uses this feature,
even when the required store object is readily available.
This ensures that evaluation will not require any builds to take place,
This ensures that evaluation doesn't require any builds to take place,
regardless of the state of the store.
)"};
@ -188,8 +188,8 @@ struct EvalSettings : Config
Setting<bool> traceFunctionCalls{this, false, "trace-function-calls",
R"(
If set to `true`, the Nix evaluator will trace every function call.
Nix will print a log message at the "vomit" level for every function
If set to `true`, the Nix evaluator traces every function call.
Nix prints a log message at the "vomit" level for every function
entrance and function exit.
function-trace entered undefined position at 1565795816999559622
@ -234,8 +234,8 @@ struct EvalSettings : Config
Setting<bool> ignoreExceptionsDuringTry{this, false, "ignore-try",
R"(
If set to true, ignore exceptions inside 'tryEval' calls when evaluating nix expressions in
debug mode (using the --debugger flag). By default the debugger will pause on all exceptions.
If set to true, ignore exceptions inside 'tryEval' calls when evaluating Nix expressions in
debug mode (using the --debugger flag). By default the debugger pauses on all exceptions.
)"};
Setting<bool> traceVerbose{this, false, "trace-verbose",
@ -247,7 +247,7 @@ struct EvalSettings : Config
Setting<bool> builtinsTraceDebugger{this, false, "debugger-on-trace",
R"(
If set to true and the `--debugger` flag is given, the following functions
will enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break).
enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break):
* [`builtins.trace`](@docroot@/language/builtins.md#builtins-trace)
* [`builtins.traceVerbose`](@docroot@/language/builtins.md#builtins-traceVerbose)
@ -269,7 +269,7 @@ struct EvalSettings : Config
Setting<bool> builtinsAbortOnWarn{this, false, "abort-on-warn",
R"(
If set to true, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) will throw an error when logging a warning.
If set to true, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) throws an error when logging a warning.
This will give you a stack trace that leads to the location of the warning.

View file

@ -1,6 +1,7 @@
#pragma once
#include "nix/expr/value.hh"
#include "nix/expr/symbol-table.hh"
namespace nix {

View file

@ -1,51 +1,35 @@
#pragma once
///@file
#include <list>
#include <map>
#include <unordered_map>
#include "nix/util/types.hh"
#include <memory_resource>
#include "nix/expr/value.hh"
#include "nix/util/chunked-vector.hh"
#include "nix/util/error.hh"
#include <boost/version.hpp>
#define USE_FLAT_SYMBOL_SET (BOOST_VERSION >= 108100)
#if USE_FLAT_SYMBOL_SET
# include <boost/unordered/unordered_flat_set.hpp>
#else
# include <boost/unordered/unordered_set.hpp>
#endif
namespace nix {
/**
* This class mainly exists to give us an operator<< for ostreams. We could also
* return plain strings from SymbolTable, but then we'd have to wrap every
* instance of a symbol that is fmt()ed, which is inconvenient and error-prone.
*/
class SymbolStr
class SymbolValue : protected Value
{
friend class SymbolStr;
friend class SymbolTable;
private:
const std::string * s;
uint32_t size_;
uint32_t idx;
explicit SymbolStr(const std::string & symbol): s(&symbol) {}
SymbolValue() = default;
public:
bool operator == (std::string_view s2) const
operator std::string_view() const noexcept
{
return *s == s2;
}
const char * c_str() const
{
return s->c_str();
}
operator const std::string_view () const
{
return *s;
}
friend std::ostream & operator <<(std::ostream & os, const SymbolStr & symbol);
bool empty() const
{
return s->empty();
return {c_str(), size_};
}
};
@ -56,24 +40,161 @@ public:
*/
class Symbol
{
friend class SymbolStr;
friend class SymbolTable;
private:
uint32_t id;
explicit Symbol(uint32_t id): id(id) {}
explicit Symbol(uint32_t id) noexcept : id(id) {}
public:
Symbol() : id(0) {}
Symbol() noexcept : id(0) {}
explicit operator bool() const { return id > 0; }
[[gnu::always_inline]]
explicit operator bool() const noexcept { return id > 0; }
auto operator<=>(const Symbol other) const { return id <=> other.id; }
bool operator==(const Symbol other) const { return id == other.id; }
auto operator<=>(const Symbol other) const noexcept { return id <=> other.id; }
bool operator==(const Symbol other) const noexcept { return id == other.id; }
friend class std::hash<Symbol>;
};
/**
* This class mainly exists to give us an operator<< for ostreams. We could also
* return plain strings from SymbolTable, but then we'd have to wrap every
* instance of a symbol that is fmt()ed, which is inconvenient and error-prone.
*/
class SymbolStr
{
friend class SymbolTable;
constexpr static size_t chunkSize{8192};
using SymbolValueStore = ChunkedVector<SymbolValue, chunkSize>;
const SymbolValue * s;
struct Key
{
using HashType = boost::hash<std::string_view>;
SymbolValueStore & store;
std::string_view s;
std::size_t hash;
std::pmr::polymorphic_allocator<char> & alloc;
Key(SymbolValueStore & store, std::string_view s, std::pmr::polymorphic_allocator<char> & stringAlloc)
: store(store)
, s(s)
, hash(HashType{}(s))
, alloc(stringAlloc) {}
};
public:
SymbolStr(const SymbolValue & s) noexcept : s(&s) {}
SymbolStr(const Key & key)
{
auto size = key.s.size();
if (size >= std::numeric_limits<uint32_t>::max()) {
throw Error("Size of symbol exceeds 4GiB and cannot be stored");
}
// for multi-threaded implementations: lock store and allocator here
const auto & [v, idx] = key.store.add(SymbolValue{});
if (size == 0) {
v.mkString("", nullptr);
} else {
auto s = key.alloc.allocate(size + 1);
memcpy(s, key.s.data(), size);
s[size] = '\0';
v.mkString(s, nullptr);
}
v.size_ = size;
v.idx = idx;
this->s = &v;
}
bool operator == (std::string_view s2) const noexcept
{
return *s == s2;
}
[[gnu::always_inline]]
const char * c_str() const noexcept
{
return s->c_str();
}
[[gnu::always_inline]]
operator std::string_view () const noexcept
{
return *s;
}
friend std::ostream & operator <<(std::ostream & os, const SymbolStr & symbol);
[[gnu::always_inline]]
bool empty() const noexcept
{
return s->size_ == 0;
}
[[gnu::always_inline]]
size_t size() const noexcept
{
return s->size_;
}
[[gnu::always_inline]]
const Value * valuePtr() const noexcept
{
return s;
}
explicit operator Symbol() const noexcept
{
return Symbol{s->idx + 1};
}
struct Hash
{
using is_transparent = void;
using is_avalanching = std::true_type;
std::size_t operator()(SymbolStr str) const
{
return Key::HashType{}(*str.s);
}
std::size_t operator()(const Key & key) const noexcept
{
return key.hash;
}
};
struct Equal
{
using is_transparent = void;
bool operator()(SymbolStr a, SymbolStr b) const noexcept
{
// strings are unique, so that a pointer comparison is OK
return a.s == b.s;
}
bool operator()(SymbolStr a, const Key & b) const noexcept
{
return a == b.s;
}
[[gnu::always_inline]]
bool operator()(const Key & a, SymbolStr b) const noexcept
{
return operator()(b, a);
}
};
};
/**
* Symbol table used by the parser and evaluator to represent and look
* up identifiers and attributes efficiently.
@ -82,29 +203,46 @@ class SymbolTable
{
private:
/**
* Map from string view (backed by ChunkedVector) -> offset into the store.
* SymbolTable is an append only data structure.
* During its lifetime the monotonic buffer holds all strings and nodes, if the symbol set is node based.
*/
std::pmr::monotonic_buffer_resource buffer;
std::pmr::polymorphic_allocator<char> stringAlloc{&buffer};
SymbolStr::SymbolValueStore store{16};
/**
* Transparent lookup of string view for a pointer to a ChunkedVector entry -> return offset into the store.
* ChunkedVector references are never invalidated.
*/
std::unordered_map<std::string_view, uint32_t> symbols;
ChunkedVector<std::string, 8192> store{16};
#if USE_FLAT_SYMBOL_SET
boost::unordered_flat_set<SymbolStr, SymbolStr::Hash, SymbolStr::Equal> symbols{SymbolStr::chunkSize};
#else
using SymbolValueAlloc = std::pmr::polymorphic_allocator<SymbolStr>;
boost::unordered_set<SymbolStr, SymbolStr::Hash, SymbolStr::Equal, SymbolValueAlloc> symbols{SymbolStr::chunkSize, {&buffer}};
#endif
public:
/**
* Converts a string into a symbol.
*/
Symbol create(std::string_view s)
{
Symbol create(std::string_view s) {
// Most symbols are looked up more than once, so we trade off insertion performance
// for lookup performance.
// FIXME: make this thread-safe.
auto it = symbols.find(s);
if (it != symbols.end())
return Symbol(it->second + 1);
return [&]<typename T>(T && key) -> Symbol {
if constexpr (requires { symbols.insert<T>(key); }) {
auto [it, _] = symbols.insert<T>(key);
return Symbol(*it);
} else {
auto it = symbols.find<T>(key);
if (it != symbols.end())
return Symbol(*it);
const auto & [rawSym, idx] = store.add(s);
symbols.emplace(rawSym, idx);
return Symbol(idx + 1);
it = symbols.emplace(key).first;
return Symbol(*it);
}
}(SymbolStr::Key{store, s, stringAlloc});
}
std::vector<SymbolStr> resolve(const std::vector<Symbol> & symbols) const
@ -118,12 +256,14 @@ public:
SymbolStr operator[](Symbol s) const
{
if (s.id == 0 || s.id > store.size())
uint32_t idx = s.id - uint32_t(1);
if (idx >= store.size())
unreachable();
return SymbolStr(store[s.id - 1]);
return store[idx];
}
size_t size() const
[[gnu::always_inline]]
size_t size() const noexcept
{
return store.size();
}
@ -147,3 +287,5 @@ struct std::hash<nix::Symbol>
return std::hash<decltype(s.id)>{}(s.id);
}
};
#undef USE_FLAT_SYMBOL_SET

View file

@ -5,7 +5,6 @@
#include <span>
#include "nix/expr/eval-gc.hh"
#include "nix/expr/symbol-table.hh"
#include "nix/expr/value/context.hh"
#include "nix/util/source-path.hh"
#include "nix/expr/print-options.hh"
@ -65,6 +64,7 @@ struct ExprLambda;
struct ExprBlackHole;
struct PrimOp;
class Symbol;
class SymbolStr;
class PosIdx;
struct Pos;
class StorePath;
@ -172,6 +172,11 @@ private:
public:
/**
* Never modify the backing `Value` object!
*/
static Value * toPtr(SymbolStr str) noexcept;
void print(EvalState &state, std::ostream &str, PrintOptions options = PrintOptions {});
// Functions needed to distinguish the type
@ -331,11 +336,6 @@ public:
void mkStringMove(const char * s, const NixStringContext & context);
inline void mkString(const SymbolStr & s)
{
mkString(s.c_str());
}
void mkPath(const SourcePath & path);
void mkPath(std::string_view path);
@ -410,11 +410,6 @@ public:
return internalType == tList1 || internalType == tList2 || internalType == tListN;
}
Value * const * listElems()
{
return internalType == tList1 || internalType == tList2 ? payload.smallList : payload.bigList.elems;
}
std::span<Value * const> listItems() const
{
assert(isList());
@ -444,8 +439,8 @@ public:
{
assert(internalType == tPath);
return SourcePath(
ref(payload.path.accessor->shared_from_this()),
CanonPath(CanonPath::unchecked_t(), payload.path.path));
ref(pathAccessor()->shared_from_this()),
CanonPath(CanonPath::unchecked_t(), pathStr()));
}
std::string_view string_view() const
@ -482,6 +477,24 @@ public:
NixFloat fpoint() const
{ return payload.fpoint; }
Lambda lambda() const
{ return payload.lambda; }
ClosureThunk thunk() const
{ return payload.thunk; }
FunctionApplicationThunk primOpApp() const
{ return payload.primOpApp; }
FunctionApplicationThunk app() const
{ return payload.app; }
const char * pathStr() const
{ return payload.path.path; }
SourceAccessor * pathAccessor() const
{ return payload.path.accessor; }
};
@ -489,7 +502,7 @@ extern ExprBlackHole eBlackHole;
bool Value::isBlackhole() const
{
return internalType == tThunk && payload.thunk.expr == (Expr*) &eBlackHole;
return internalType == tThunk && thunk().expr == (Expr*) &eBlackHole;
}
void Value::mkBlackhole()

View file

@ -606,7 +606,7 @@ void ExprLambda::setDocComment(DocComment docComment) {
size_t SymbolTable::totalSize() const
{
size_t n = 0;
dump([&] (const std::string & s) { n += s.size(); });
dump([&] (SymbolStr s) { n += s.size(); });
return n;
}

View file

@ -14,6 +14,7 @@
#include "nix/expr/value-to-xml.hh"
#include "nix/expr/primops.hh"
#include "nix/fetchers/fetch-to-store.hh"
#include "nix/util/sort.hh"
#include <boost/container/small_vector.hpp>
#include <nlohmann/json.hpp>
@ -345,7 +346,7 @@ static RegisterPrimOp primop_import({
> }
> ```
>
> then the following `foo.nix` will give an error:
> then the following `foo.nix` throws an error:
>
> ```nix
> # foo.nix
@ -651,7 +652,7 @@ struct CompareValues
// Note: we don't take the accessor into account
// since it's not obvious how to compare them in a
// reproducible way.
return strcmp(v1->payload.path.path, v2->payload.path.path) < 0;
return strcmp(v1->pathStr(), v2->pathStr()) < 0;
case nList:
// Lexicographic comparison
for (size_t i = 0;; i++) {
@ -915,7 +916,7 @@ static RegisterPrimOp primop_ceil({
a NixInt and if `*number* < -9007199254740992` or `*number* > 9007199254740992`.
If the datatype of *number* is neither a NixInt (signed 64-bit integer) nor a NixFloat
(IEEE-754 double-precision floating-point number), an evaluation error will be thrown.
(IEEE-754 double-precision floating-point number), an evaluation error is thrown.
)",
.fun = prim_ceil,
});
@ -1002,15 +1003,15 @@ static RegisterPrimOp primop_tryEval({
Try to shallowly evaluate *e*. Return a set containing the
attributes `success` (`true` if *e* evaluated successfully,
`false` if an error was thrown) and `value`, equalling *e* if
successful and `false` otherwise. `tryEval` will only prevent
successful and `false` otherwise. `tryEval` only prevents
errors created by `throw` or `assert` from being thrown.
Errors `tryEval` will not catch are for example those created
Errors `tryEval` doesn't catch are, for example, those created
by `abort` and type errors generated by builtins. Also note that
this doesn't evaluate *e* deeply, so `let e = { x = throw ""; };
in (builtins.tryEval e).success` will be `true`. Using
in (builtins.tryEval e).success` is `true`. Using
`builtins.deepSeq` one can get the expected result:
`let e = { x = throw ""; }; in
(builtins.tryEval (builtins.deepSeq e e)).success` will be
(builtins.tryEval (builtins.deepSeq e e)).success` is
`false`.
`tryEval` intentionally does not return the error message, because that risks bringing non-determinism into the evaluation result, and it would become very difficult to improve error reporting without breaking existing expressions.
@ -1108,7 +1109,7 @@ static RegisterPrimOp primop_trace({
If the
[`debugger-on-trace`](@docroot@/command-ref/conf-file.md#conf-debugger-on-trace)
option is set to `true` and the `--debugger` flag is given, the
interactive debugger will be started when `trace` is called (like
interactive debugger is started when `trace` is called (like
[`break`](@docroot@/language/builtins.md#builtins-break)).
)",
.fun = prim_trace,
@ -1157,7 +1158,7 @@ static RegisterPrimOp primop_warn({
If the
[`abort-on-warn`](@docroot@/command-ref/conf-file.md#conf-abort-on-warn)
option is set, the evaluation will be aborted after the warning is printed.
option is set, the evaluation is aborted after the warning is printed.
This is useful to reveal the stack trace of the warning, when the context is non-interactive and a debugger can not be launched.
)",
.fun = prim_warn,
@ -1179,7 +1180,7 @@ static void prim_second(EvalState & state, const PosIdx pos, Value * * args, Val
static void derivationStrictInternal(
EvalState & state,
const std::string & name,
std::string_view name,
const Bindings * attrs,
Value & v);
@ -1199,7 +1200,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
/* Figure out the name first (for stack backtraces). */
auto nameAttr = state.getAttr(state.sName, attrs, "in the attrset passed as argument to builtins.derivationStrict");
std::string drvName;
std::string_view drvName;
try {
drvName = state.forceStringNoCtx(*nameAttr->value, pos, "while evaluating the `name` attribute passed to builtins.derivationStrict");
} catch (Error & e) {
@ -1258,7 +1259,7 @@ static void checkDerivationName(EvalState & state, std::string_view drvName)
static void derivationStrictInternal(
EvalState & state,
const std::string & drvName,
std::string_view drvName,
const Bindings * attrs,
Value & v)
{
@ -1634,7 +1635,7 @@ static RegisterPrimOp primop_placeholder({
.name = "placeholder",
.args = {"output"},
.doc = R"(
Return at
Return an
[output placeholder string](@docroot@/store/derivation/index.md#output-placeholder)
for the specified *output* that will be substituted by the corresponding
[output path](@docroot@/glossary.md#gloss-output-path)
@ -1799,7 +1800,7 @@ static RegisterPrimOp primop_baseNameOf({
After this, the *base name* is returned as previously described, assuming `/` as the directory separator. (Note that evaluation must be platform independent.)
This is somewhat similar to the [GNU `basename`](https://www.gnu.org/software/coreutils/manual/html_node/basename-invocation.html) command, but GNU `basename` will strip any number of trailing slashes.
This is somewhat similar to the [GNU `basename`](https://www.gnu.org/software/coreutils/manual/html_node/basename-invocation.html) command, but GNU `basename` strips any number of trailing slashes.
)",
.fun = prim_baseNameOf,
});
@ -1898,7 +1899,7 @@ static void prim_findFile(EvalState & state, const PosIdx pos, Value * * args, V
try {
auto rewrites = state.realiseContext(context);
path = rewriteStrings(path, rewrites);
path = rewriteStrings(std::move(path), rewrites);
} catch (InvalidPathError & e) {
state.error<EvalError>(
"cannot find '%1%', since path '%2%' is not valid",
@ -1908,8 +1909,8 @@ static void prim_findFile(EvalState & state, const PosIdx pos, Value * * args, V
}
lookupPath.elements.emplace_back(LookupPath::Elem {
.prefix = LookupPath::Prefix { .s = prefix },
.path = LookupPath::Path { .s = path },
.prefix = LookupPath::Prefix { .s = std::move(prefix) },
.path = LookupPath::Path { .s = std::move(path) },
});
}
@ -1998,9 +1999,9 @@ static RegisterPrimOp primop_findFile(PrimOp {
> ]
> ```
>
> and a *lookup-path* value `"nixos-config"` will cause Nix to try `/home/eelco/Dev/nixos-config` and `/etc/nixos` in that order and return the first path that exists.
> and a *lookup-path* value `"nixos-config"` causes Nix to try `/home/eelco/Dev/nixos-config` and `/etc/nixos` in that order and return the first path that exists.
If `path` starts with `http://` or `https://`, it is interpreted as the URL of a tarball that will be downloaded and unpacked to a temporary location.
If `path` starts with `http://` or `https://`, it is interpreted as the URL of a tarball to be downloaded and unpacked to a temporary location.
The tarball must consist of a single top-level directory.
The URLs of the tarballs from the official `nixos.org` channels can be abbreviated as `channel:<channel-name>`.
@ -2147,7 +2148,7 @@ static RegisterPrimOp primop_readDir({
Return the contents of the directory *path* as a set mapping
directory entries to the corresponding file type. For instance, if
directory `A` contains a regular file `B` and another directory
`C`, then `builtins.readDir ./A` will return the set
`C`, then `builtins.readDir ./A` returns the set
```nix
{ B = "regular"; C = "directory"; }
@ -2182,8 +2183,8 @@ static RegisterPrimOp primop_outputOf({
[input placeholder string](@docroot@/store/derivation/index.md#input-placeholder)
if needed.
If the derivation has a statically-known output path (i.e. the derivation output is input-addressed, or fixed content-addressed), the output path will just be returned.
But if the derivation is content-addressed or if the derivation is itself not-statically produced (i.e. is the output of another derivation), an input placeholder will be returned instead.
If the derivation has a statically-known output path (i.e. the derivation output is input-addressed, or fixed content-addressed), the output path is returned.
But if the derivation is content-addressed or if the derivation is itself not-statically produced (i.e. is the output of another derivation), an input placeholder is returned instead.
*`derivation reference`* must be a string that may contain a regular store path to a derivation, or may be an input placeholder reference.
If the derivation is produced by a derivation, you must explicitly select `drv.outPath`.
@ -2196,7 +2197,7 @@ static RegisterPrimOp primop_outputOf({
"out"
```
will return a input placeholder for the output of the output of `myDrv`.
returns an input placeholder for the output of the output of `myDrv`.
This primop corresponds to the `^` sigil for [deriving paths](@docroot@/glossary.md#gloss-deriving-paths), e.g. as part of installable syntax on the command line.
)",
@ -2374,8 +2375,8 @@ static RegisterPrimOp primop_fromJSON({
static void prim_toFile(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
NixStringContext context;
std::string name(state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile"));
std::string contents(state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile"));
auto name = state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile");
auto contents = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile");
StorePathSet refs;
@ -2582,12 +2583,12 @@ static RegisterPrimOp primop_filterSource({
>
> `filterSource` should not be used to filter store paths. Since
> `filterSource` uses the name of the input directory while naming
> the output directory, doing so will produce a directory name in
> the output directory, doing so produces a directory name in
> the form of `<hash2>-<hash>-<name>`, where `<hash>-<name>` is
> the name of the input directory. Since `<hash>` depends on the
> unfiltered directory, the name of the output directory will
> indirectly depend on files that are filtered out by the
> function. This will trigger a rebuild even when a filtered out
> unfiltered directory, the name of the output directory
> indirectly depends on files that are filtered out by the
> function. This triggers a rebuild even when a filtered out
> file is changed. Use `builtins.path` instead, which allows
> specifying the name of the output directory.
@ -2602,8 +2603,8 @@ static RegisterPrimOp primop_filterSource({
}
```
However, if `source-dir` is a Subversion working copy, then all
those annoying `.svn` subdirectories will also be copied to the
However, if `source-dir` is a Subversion working copy, then all of
those annoying `.svn` subdirectories are also copied to the
store. Worse, the contents of those directories may change a lot,
causing lots of spurious rebuilds. With `filterSource` you can
filter out the `.svn` directories:
@ -2623,8 +2624,8 @@ static RegisterPrimOp primop_filterSource({
`"regular"`, `"directory"`, `"symlink"` or `"unknown"` (for other
kinds of files such as device nodes or fifos but note that those
cannot be copied to the Nix store, so if the predicate returns
`true` for them, the copy will fail). If you exclude a directory,
the entire corresponding subtree of *e2* will be excluded.
`true` for them, the copy fails). If you exclude a directory,
the entire corresponding subtree of *e2* is excluded.
)",
.fun = prim_filterSource,
});
@ -2632,7 +2633,7 @@ static RegisterPrimOp primop_filterSource({
static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
std::optional<SourcePath> path;
std::string name;
std::string_view name;
Value * filterFun = nullptr;
auto method = ContentAddressMethod::Raw::NixArchive;
std::optional<Hash> expectedHash;
@ -2698,7 +2699,7 @@ static RegisterPrimOp primop_path({
- sha256\
When provided, this is the expected hash of the file at the
path. Evaluation will fail if the hash is incorrect, and
path. Evaluation fails if the hash is incorrect, and
providing a hash allows `builtins.path` to be used even when the
`pure-eval` nix config option is on.
)",
@ -2720,7 +2721,7 @@ static void prim_attrNames(EvalState & state, const PosIdx pos, Value * * args,
auto list = state.buildList(args[0]->attrs()->size());
for (const auto & [n, i] : enumerate(*args[0]->attrs()))
(list[n] = state.allocValue())->mkString(state.symbols[i.name]);
list[n] = Value::toPtr(state.symbols[i.name]);
std::sort(list.begin(), list.end(),
[](Value * v1, Value * v2) { return strcmp(v1->c_str(), v2->c_str()) < 0; });
@ -3138,12 +3139,12 @@ static void prim_functionArgs(EvalState & state, const PosIdx pos, Value * * arg
if (!args[0]->isLambda())
state.error<TypeError>("'functionArgs' requires a function").atPos(pos).debugThrow();
if (!args[0]->payload.lambda.fun->hasFormals()) {
if (!args[0]->lambda().fun->hasFormals()) {
v.mkAttrs(&state.emptyBindings);
return;
}
const auto &formals = args[0]->payload.lambda.fun->formals->formals;
const auto &formals = args[0]->lambda().fun->formals->formals;
auto attrs = state.buildBindings(formals.size());
for (auto & i : formals)
attrs.insert(i.name, state.getBool(i.def), i.pos);
@ -3180,9 +3181,8 @@ static void prim_mapAttrs(EvalState & state, const PosIdx pos, Value * * args, V
auto attrs = state.buildBindings(args[1]->attrs()->size());
for (auto & i : *args[1]->attrs()) {
Value * vName = state.allocValue();
Value * vName = Value::toPtr(state.symbols[i.name]);
Value * vFun2 = state.allocValue();
vName->mkString(state.symbols[i.name]);
vFun2->mkApp(args[0], vName);
attrs.alloc(i.name).mkApp(vFun2, i.value);
}
@ -3246,8 +3246,7 @@ static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value * * arg
auto attrs = state.buildBindings(attrsSeen.size());
for (auto & [sym, elem] : attrsSeen) {
auto name = state.allocValue();
name->mkString(state.symbols[sym]);
auto name = Value::toPtr(state.symbols[sym]);
auto call1 = state.allocValue();
call1->mkApp(args[0], name);
auto call2 = state.allocValue();
@ -3697,10 +3696,14 @@ static void prim_sort(EvalState & state, const PosIdx pos, Value * * args, Value
return state.forceBool(vBool, pos, "while evaluating the return value of the sorting function passed to builtins.sort");
};
/* FIXME: std::sort can segfault if the comparator is not a strict
weak ordering. What to do? std::stable_sort() seems more
resilient, but no guarantees... */
std::stable_sort(list.begin(), list.end(), comparator);
/* NOTE: Using custom implementation because std::sort and std::stable_sort
are not resilient to comparators that violate strict weak ordering. Diagnosing
incorrect implementations is a O(n^3) problem, so doing the checks is much more
expensive that doing the sorting. For this reason we choose to use sorting algorithms
that are can't be broken by invalid comprators. peeksort (mergesort)
doesn't misbehave when any of the strict weak order properties is
violated - output is always a reordering of the input. */
peeksort(list.begin(), list.end(), comparator);
v.mkList(list);
}
@ -3722,6 +3725,32 @@ static RegisterPrimOp primop_sort({
This is a stable sort: it preserves the relative order of elements
deemed equal by the comparator.
*comparator* must impose a strict weak ordering on the set of values
in the *list*. This means that for any elements *a*, *b* and *c* from the
*list*, *comparator* must satisfy the following relations:
1. Transitivity
```nix
comparator a b && comparator b c -> comparator a c
```
1. Irreflexivity
```nix
comparator a a == false
```
1. Transitivity of equivalence
```nix
let equiv = a: b: (!comparator a b && !comparator b a); in
equiv a b && equiv b c -> equiv a c
```
If the *comparator* violates any of these properties, then `builtins.sort`
reorders elements in an unspecified manner.
)",
.fun = prim_sort,
});
@ -4564,12 +4593,12 @@ static void prim_replaceStrings(EvalState & state, const PosIdx pos, Value * * a
"'from' and 'to' arguments passed to builtins.replaceStrings have different lengths"
).atPos(pos).debugThrow();
std::vector<std::string> from;
std::vector<std::string_view> from;
from.reserve(args[0]->listSize());
for (auto elem : args[0]->listItems())
from.emplace_back(state.forceString(*elem, pos, "while evaluating one of the strings to replace passed to builtins.replaceStrings"));
std::unordered_map<size_t, std::string> cache;
std::unordered_map<size_t, std::string_view> cache;
auto to = args[1]->listItems();
NixStringContext context;
@ -4808,7 +4837,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings)
.type = nInt,
.doc = R"(
Return the [Unix time](https://en.wikipedia.org/wiki/Unix_time) at first evaluation.
Repeated references to that name will re-use the initially obtained value.
Repeated references to that name re-use the initially obtained value.
Example:
@ -4823,7 +4852,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings)
1683705525
```
The [store path](@docroot@/store/store-path.md) of a derivation depending on `currentTime` will differ for each evaluation, unless both evaluate `builtins.currentTime` in the same second.
The [store path](@docroot@/store/store-path.md) of a derivation depending on `currentTime` differs for each evaluation, unless both evaluate `builtins.currentTime` in the same second.
)",
.impureOnly = true,
});

View file

@ -240,7 +240,7 @@ static RegisterPrimOp primop_getContext({
The string context tracks references to derivations within a string.
It is represented as an attribute set of [store derivation](@docroot@/glossary.md#gloss-store-derivation) paths mapping to output names.
Using [string interpolation](@docroot@/language/string-interpolation.md) on a derivation will add that derivation to the string context.
Using [string interpolation](@docroot@/language/string-interpolation.md) on a derivation adds that derivation to the string context.
For example,
```nix

View file

@ -214,7 +214,7 @@ static RegisterPrimOp primop_fetchClosure({
.doc = R"(
Fetch a store path [closure](@docroot@/glossary.md#gloss-closure) from a binary cache, and return the store path as a string with context.
This function can be invoked in three ways, that we will discuss in order of preference.
This function can be invoked in three ways that we will discuss in order of preference.
**Fetch a content-addressed store path**

View file

@ -191,7 +191,7 @@ static void fetchTree(
input.to_string());
else
state.error<EvalError>(
"in pure evaluation mode, '%s' will not fetch unlocked input '%s'",
"in pure evaluation mode, '%s' doesn't fetch unlocked input '%s'",
fetcher, input.to_string()).atPos(pos).debugThrow();
}
@ -243,7 +243,7 @@ static RegisterPrimOp primop_fetchTree({
That is, `fetchTree` is idempotent.
Downloads are cached in `$XDG_CACHE_HOME/nix`.
The remote source will be fetched from the network if both are true:
The remote source is fetched from the network if both are true:
- A NAR hash is supplied and the corresponding store path is not [valid](@docroot@/glossary.md#gloss-validity), that is, not available in the store
> **Note**
@ -338,7 +338,7 @@ static RegisterPrimOp primop_fetchTree({
> **Note**
>
> If the URL points to a local directory, and no `ref` or `rev` is given, Nix will only consider files added to the Git index, as listed by `git ls-files` but use the *current file contents* of the Git working directory.
> If the URL points to a local directory, and no `ref` or `rev` is given, Nix only considers files added to the Git index, as listed by `git ls-files` but use the *current file contents* of the Git working directory.
- `ref` (String, optional)
@ -681,7 +681,7 @@ static RegisterPrimOp primop_fetchGit({
This option has no effect once `shallow` cloning is enabled.
By default, the `ref` value is prefixed with `refs/heads/`.
As of 2.3.0, Nix will not prefix `refs/heads/` if `ref` starts with `refs/`.
As of 2.3.0, Nix doesn't prefix `refs/heads/` if `ref` starts with `refs/`.
- `submodules` (default: `false`)
@ -840,7 +840,7 @@ static RegisterPrimOp primop_fetchGit({
}
```
Nix will refetch the branch according to the [`tarball-ttl`](@docroot@/command-ref/conf-file.md#conf-tarball-ttl) setting.
Nix refetches the branch according to the [`tarball-ttl`](@docroot@/command-ref/conf-file.md#conf-tarball-ttl) setting.
This behavior is disabled in [pure evaluation mode](@docroot@/command-ref/conf-file.md#conf-pure-eval).
@ -851,9 +851,9 @@ static RegisterPrimOp primop_fetchGit({
```
If the URL points to a local directory, and no `ref` or `rev` is
given, `fetchGit` will use the current content of the checked-out
files, even if they are not committed or added to Git's index. It will
only consider files added to the Git repository, as listed by `git ls-files`.
given, `fetchGit` uses the current content of the checked-out
files, even if they are not committed or added to Git's index. It
only considers files added to the Git repository, as listed by `git ls-files`.
)",
.fun = prim_fetchGit,
});

View file

@ -1,6 +1,6 @@
generated_headers += gen_header.process(
'derivation.nix',
preserve_path_from: meson.project_source_root(),
preserve_path_from : meson.project_source_root(),
)
sources += files(

View file

@ -453,13 +453,13 @@ private:
if (v.isLambda()) {
output << "lambda";
if (v.payload.lambda.fun) {
if (v.payload.lambda.fun->name) {
output << " " << state.symbols[v.payload.lambda.fun->name];
if (v.lambda().fun) {
if (v.lambda().fun->name) {
output << " " << state.symbols[v.lambda().fun->name];
}
std::ostringstream s;
s << state.positions[v.payload.lambda.fun->pos];
s << state.positions[v.lambda().fun->pos];
output << " @ " << filterANSIEscapes(toView(s));
}
} else if (v.isPrimOp()) {

View file

@ -126,18 +126,18 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
break;
}
XMLAttrs xmlAttrs;
if (location) posToXML(state, xmlAttrs, state.positions[v.payload.lambda.fun->pos]);
if (location) posToXML(state, xmlAttrs, state.positions[v.lambda().fun->pos]);
XMLOpenElement _(doc, "function", xmlAttrs);
if (v.payload.lambda.fun->hasFormals()) {
if (v.lambda().fun->hasFormals()) {
XMLAttrs attrs;
if (v.payload.lambda.fun->arg) attrs["name"] = state.symbols[v.payload.lambda.fun->arg];
if (v.payload.lambda.fun->formals->ellipsis) attrs["ellipsis"] = "1";
if (v.lambda().fun->arg) attrs["name"] = state.symbols[v.lambda().fun->arg];
if (v.lambda().fun->formals->ellipsis) attrs["ellipsis"] = "1";
XMLOpenElement _(doc, "attrspat", attrs);
for (auto & i : v.payload.lambda.fun->formals->lexicographicOrder(state.symbols))
for (auto & i : v.lambda().fun->formals->lexicographicOrder(state.symbols))
doc.writeEmptyElement("attr", singletonAttrs("name", state.symbols[i.name]));
} else
doc.writeEmptyElement("varpat", singletonAttrs("name", state.symbols[v.payload.lambda.fun->arg]));
doc.writeEmptyElement("varpat", singletonAttrs("name", state.symbols[v.lambda().fun->arg]));
break;
}

View file

@ -28,7 +28,7 @@ struct Settings : public Config
space-separated `host=token` values. The specific token
used is selected by matching the `host` portion against the
"host" specification of the input. The `host` portion may
contain a path element which will match against the prefix
contain a path element which matches against the prefix
URL for the input. (eg: `github.com/org=token`). The actual use
of the `token` value is determined by the type of resource
being accessed:
@ -95,11 +95,11 @@ struct Settings : public Config
Setting<bool> trustTarballsFromGitForges{
this, true, "trust-tarballs-from-git-forges",
R"(
If enabled (the default), Nix will consider tarballs from
If enabled (the default), Nix considers tarballs from
GitHub and similar Git forges to be locked if a Git revision
is specified,
e.g. `github:NixOS/patchelf/7c2f768bf9601268a4e71c2ebe91e2011918a70f`.
This requires Nix to trust that the provider will return the
This requires Nix to trust that the provider returns the
correct contents for the specified Git revision.
If disabled, such tarballs are only considered locked if a

View file

@ -252,8 +252,8 @@ static Flake readFlake(
if (auto outputs = vInfo.attrs()->get(sOutputs)) {
expectType(state, nFunction, *outputs->value, outputs->pos);
if (outputs->value->isLambda() && outputs->value->payload.lambda.fun->hasFormals()) {
for (auto & formal : outputs->value->payload.lambda.fun->formals->formals) {
if (outputs->value->isLambda() && outputs->value->lambda().fun->hasFormals()) {
for (auto & formal : outputs->value->lambda().fun->formals->formals) {
if (formal.name != state.sSelf)
flake.inputs.emplace(state.symbols[formal.name], FlakeInput {
.ref = parseFlakeRef(state.fetchSettings, std::string(state.symbols[formal.name]))
@ -570,7 +570,7 @@ LockedFlake lockFlake(
/* Get the input flake, resolve 'path:./...'
flakerefs relative to the parent flake. */
auto getInputFlake = [&](const FlakeRef & ref)
auto getInputFlake = [&](const FlakeRef & ref, const fetchers::UseRegistries useRegistries)
{
if (auto resolvedPath = resolveRelativePath()) {
return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath);
@ -578,7 +578,7 @@ LockedFlake lockFlake(
return getFlake(
state,
ref,
useRegistriesInputs,
useRegistries,
inputAttrPath);
}
};
@ -660,7 +660,7 @@ LockedFlake lockFlake(
}
if (mustRefetch) {
auto inputFlake = getInputFlake(oldLock->lockedRef);
auto inputFlake = getInputFlake(oldLock->lockedRef, useRegistriesInputs);
nodePaths.emplace(childNode, inputFlake.path.parent());
computeLocks(inputFlake.inputs, childNode, inputAttrPath, oldLock, followsPrefix,
inputFlake.path, false);
@ -685,10 +685,11 @@ LockedFlake lockFlake(
nuked the next time we update the lock
file. That is, overrides are sticky unless you
use --no-write-lock-file. */
auto ref = (input2.ref && explicitCliOverrides.contains(inputAttrPath)) ? *input2.ref : *input.ref;
auto inputIsOverride = explicitCliOverrides.contains(inputAttrPath);
auto ref = (input2.ref && inputIsOverride) ? *input2.ref : *input.ref;
if (input.isFlake) {
auto inputFlake = getInputFlake(*input.ref);
auto inputFlake = getInputFlake(*input.ref, inputIsOverride ? fetchers::UseRegistries::All : useRegistriesInputs);
auto childNode = make_ref<LockedNode>(
inputFlake.lockedRef,
@ -793,10 +794,10 @@ LockedFlake lockFlake(
if (auto unlockedInput = newLockFile.isUnlocked(state.fetchSettings)) {
if (lockFlags.failOnUnlocked)
throw Error(
"Will not write lock file of flake '%s' because it has an unlocked input ('%s'). "
"Not writing lock file of flake '%s' because it has an unlocked input ('%s'). "
"Use '--allow-dirty-locks' to allow this anyway.", topRef, *unlockedInput);
if (state.fetchSettings.warnDirty)
warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
warn("not writing lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
} else {
if (!lockFlags.updateLockFile)
throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);

View file

@ -43,9 +43,9 @@ struct PluginSettings : Config
{},
"plugin-files",
R"(
A list of plugin files to be loaded by Nix. Each of these files will
be dlopened by Nix. If they contain the symbol `nix_plugin_entry()`,
this symbol will be called. Alternatively, they can affect execution
A list of plugin files to be loaded by Nix. Each of these files is
dlopened by Nix. If they contain the symbol `nix_plugin_entry()`,
this symbol is called. Alternatively, they can affect execution
through static initialization. In particular, these plugins may construct
static instances of RegisterPrimOp to add new primops or constants to the
expression language, RegisterStoreImplementation to add new store
@ -60,7 +60,7 @@ struct PluginSettings : Config
itself, they must be DSOs compatible with the instance of Nix
running at the time (i.e. compiled against the same headers, not
linked to any incompatible libraries). They should not be linked to
any Nix libs directly, as those will be available already at load
any Nix libraries directly, as those are already available at load
time.
If an entry in the list is a directory, all files in the directory

View file

@ -34,6 +34,8 @@ public:
Store * store;
std::string nixDir;
std::string nixStoreDir;
std::string nixStateDir;
std::string nixLogDir;
protected:
void init_local_store()
@ -53,11 +55,13 @@ protected:
#endif
nixStoreDir = nixDir + "/my_nix_store";
nixStateDir = nixDir + "/my_state";
nixLogDir = nixDir + "/my_log";
// Options documented in `nix help-stores`
const char * p1[] = {"store", nixStoreDir.c_str()};
const char * p2[] = {"state", (new std::string(nixDir + "/my_state"))->c_str()};
const char * p3[] = {"log", (new std::string(nixDir + "/my_log"))->c_str()};
const char * p2[] = {"state", nixStateDir.c_str()};
const char * p3[] = {"log", nixLogDir.c_str()};
const char ** params[] = {p1, p2, p3, nullptr};

View file

@ -5,7 +5,6 @@
],
"builder": "/bin/bash",
"env": {
"__json": "{\"builder\":\"/bin/bash\",\"name\":\"advanced-attributes-structured-attrs-defaults\",\"outputHashAlgo\":\"sha256\",\"outputHashMode\":\"recursive\",\"outputs\":[\"out\",\"dev\"],\"system\":\"my-system\"}",
"dev": "/02qcpld1y6xhs5gz9bchpxaw0xdhmsp5dv88lh25r2ss44kh8dxz",
"out": "/1rz4g4znpzjwh1xymhjpm42vipw92pr73vdgl6xs1hycac8kf2n9"
},
@ -22,5 +21,16 @@
"method": "nar"
}
},
"structuredAttrs": {
"builder": "/bin/bash",
"name": "advanced-attributes-structured-attrs-defaults",
"outputHashAlgo": "sha256",
"outputHashMode": "recursive",
"outputs": [
"out",
"dev"
],
"system": "my-system"
},
"system": "my-system"
}

View file

@ -5,7 +5,6 @@
],
"builder": "/bin/bash",
"env": {
"__json": "{\"__darwinAllowLocalNetworking\":true,\"__impureHostDeps\":[\"/usr/bin/ditto\"],\"__noChroot\":true,\"__sandboxProfile\":\"sandcastle\",\"allowSubstitutes\":false,\"builder\":\"/bin/bash\",\"exportReferencesGraph\":{\"refs1\":[\"/164j69y6zir9z0339n8pjigg3rckinlr77bxsavzizdaaljb7nh9\"],\"refs2\":[\"/nix/store/qnml92yh97a6fbrs2m5qg5cqlc8vni58-bar.drv\"]},\"impureEnvVars\":[\"UNICORN\"],\"name\":\"advanced-attributes-structured-attrs\",\"outputChecks\":{\"bin\":{\"disallowedReferences\":[\"/0nyw57wm2iicnm9rglvjmbci3ikmcp823czdqdzdcgsnnwqps71g\"],\"disallowedRequisites\":[\"/07f301yqyz8c6wf6bbbavb2q39j4n8kmcly1s09xadyhgy6x2wr8\"]},\"dev\":{\"maxClosureSize\":5909,\"maxSize\":789},\"out\":{\"allowedReferences\":[\"/164j69y6zir9z0339n8pjigg3rckinlr77bxsavzizdaaljb7nh9\"],\"allowedRequisites\":[\"/0nr45p69vn6izw9446wsh9bng9nndhvn19kpsm4n96a5mycw0s4z\"]}},\"outputHashAlgo\":\"sha256\",\"outputHashMode\":\"recursive\",\"outputs\":[\"out\",\"bin\",\"dev\"],\"preferLocalBuild\":true,\"requiredSystemFeatures\":[\"rainbow\",\"uid-range\"],\"system\":\"my-system\"}",
"bin": "/04f3da1kmbr67m3gzxikmsl4vjz5zf777sv6m14ahv22r65aac9m",
"dev": "/02qcpld1y6xhs5gz9bchpxaw0xdhmsp5dv88lh25r2ss44kh8dxz",
"out": "/1rz4g4znpzjwh1xymhjpm42vipw92pr73vdgl6xs1hycac8kf2n9"
@ -44,5 +43,62 @@
"method": "nar"
}
},
"structuredAttrs": {
"__darwinAllowLocalNetworking": true,
"__impureHostDeps": [
"/usr/bin/ditto"
],
"__noChroot": true,
"__sandboxProfile": "sandcastle",
"allowSubstitutes": false,
"builder": "/bin/bash",
"exportReferencesGraph": {
"refs1": [
"/164j69y6zir9z0339n8pjigg3rckinlr77bxsavzizdaaljb7nh9"
],
"refs2": [
"/nix/store/qnml92yh97a6fbrs2m5qg5cqlc8vni58-bar.drv"
]
},
"impureEnvVars": [
"UNICORN"
],
"name": "advanced-attributes-structured-attrs",
"outputChecks": {
"bin": {
"disallowedReferences": [
"/0nyw57wm2iicnm9rglvjmbci3ikmcp823czdqdzdcgsnnwqps71g"
],
"disallowedRequisites": [
"/07f301yqyz8c6wf6bbbavb2q39j4n8kmcly1s09xadyhgy6x2wr8"
]
},
"dev": {
"maxClosureSize": 5909,
"maxSize": 789
},
"out": {
"allowedReferences": [
"/164j69y6zir9z0339n8pjigg3rckinlr77bxsavzizdaaljb7nh9"
],
"allowedRequisites": [
"/0nr45p69vn6izw9446wsh9bng9nndhvn19kpsm4n96a5mycw0s4z"
]
}
},
"outputHashAlgo": "sha256",
"outputHashMode": "recursive",
"outputs": [
"out",
"bin",
"dev"
],
"preferLocalBuild": true,
"requiredSystemFeatures": [
"rainbow",
"uid-range"
],
"system": "my-system"
},
"system": "my-system"
}

View file

@ -5,7 +5,6 @@
],
"builder": "/bin/bash",
"env": {
"__json": "{\"builder\":\"/bin/bash\",\"name\":\"advanced-attributes-structured-attrs-defaults\",\"outputs\":[\"out\",\"dev\"],\"system\":\"my-system\"}",
"dev": "/nix/store/8bazivnbipbyi569623skw5zm91z6kc2-advanced-attributes-structured-attrs-defaults-dev",
"out": "/nix/store/f8f8nvnx32bxvyxyx2ff7akbvwhwd9dw-advanced-attributes-structured-attrs-defaults"
},
@ -20,5 +19,14 @@
"path": "/nix/store/f8f8nvnx32bxvyxyx2ff7akbvwhwd9dw-advanced-attributes-structured-attrs-defaults"
}
},
"structuredAttrs": {
"builder": "/bin/bash",
"name": "advanced-attributes-structured-attrs-defaults",
"outputs": [
"out",
"dev"
],
"system": "my-system"
},
"system": "my-system"
}

View file

@ -5,7 +5,6 @@
],
"builder": "/bin/bash",
"env": {
"__json": "{\"__darwinAllowLocalNetworking\":true,\"__impureHostDeps\":[\"/usr/bin/ditto\"],\"__noChroot\":true,\"__sandboxProfile\":\"sandcastle\",\"allowSubstitutes\":false,\"builder\":\"/bin/bash\",\"exportReferencesGraph\":{\"refs1\":[\"/nix/store/p0hax2lzvjpfc2gwkk62xdglz0fcqfzn-foo\"],\"refs2\":[\"/nix/store/vj2i49jm2868j2fmqvxm70vlzmzvgv14-bar.drv\"]},\"impureEnvVars\":[\"UNICORN\"],\"name\":\"advanced-attributes-structured-attrs\",\"outputChecks\":{\"bin\":{\"disallowedReferences\":[\"/nix/store/r5cff30838majxk5mp3ip2diffi8vpaj-bar\"],\"disallowedRequisites\":[\"/nix/store/9b61w26b4avv870dw0ymb6rw4r1hzpws-bar-dev\"]},\"dev\":{\"maxClosureSize\":5909,\"maxSize\":789},\"out\":{\"allowedReferences\":[\"/nix/store/p0hax2lzvjpfc2gwkk62xdglz0fcqfzn-foo\"],\"allowedRequisites\":[\"/nix/store/z0rjzy29v9k5qa4nqpykrbzirj7sd43v-foo-dev\"]}},\"outputs\":[\"out\",\"bin\",\"dev\"],\"preferLocalBuild\":true,\"requiredSystemFeatures\":[\"rainbow\",\"uid-range\"],\"system\":\"my-system\"}",
"bin": "/nix/store/33qms3h55wlaspzba3brlzlrm8m2239g-advanced-attributes-structured-attrs-bin",
"dev": "/nix/store/wyfgwsdi8rs851wmy1xfzdxy7y5vrg5l-advanced-attributes-structured-attrs-dev",
"out": "/nix/store/7cxy4zx1vqc885r4jl2l64pymqbdmhii-advanced-attributes-structured-attrs"
@ -41,5 +40,60 @@
"path": "/nix/store/7cxy4zx1vqc885r4jl2l64pymqbdmhii-advanced-attributes-structured-attrs"
}
},
"structuredAttrs": {
"__darwinAllowLocalNetworking": true,
"__impureHostDeps": [
"/usr/bin/ditto"
],
"__noChroot": true,
"__sandboxProfile": "sandcastle",
"allowSubstitutes": false,
"builder": "/bin/bash",
"exportReferencesGraph": {
"refs1": [
"/nix/store/p0hax2lzvjpfc2gwkk62xdglz0fcqfzn-foo"
],
"refs2": [
"/nix/store/vj2i49jm2868j2fmqvxm70vlzmzvgv14-bar.drv"
]
},
"impureEnvVars": [
"UNICORN"
],
"name": "advanced-attributes-structured-attrs",
"outputChecks": {
"bin": {
"disallowedReferences": [
"/nix/store/r5cff30838majxk5mp3ip2diffi8vpaj-bar"
],
"disallowedRequisites": [
"/nix/store/9b61w26b4avv870dw0ymb6rw4r1hzpws-bar-dev"
]
},
"dev": {
"maxClosureSize": 5909,
"maxSize": 789
},
"out": {
"allowedReferences": [
"/nix/store/p0hax2lzvjpfc2gwkk62xdglz0fcqfzn-foo"
],
"allowedRequisites": [
"/nix/store/z0rjzy29v9k5qa4nqpykrbzirj7sd43v-foo-dev"
]
}
},
"outputs": [
"out",
"bin",
"dev"
],
"preferLocalBuild": true,
"requiredSystemFeatures": [
"rainbow",
"uid-range"
],
"system": "my-system"
},
"system": "my-system"
}

View file

@ -71,17 +71,21 @@ TEST_F(nix_api_store_test, ReturnsValidStorePath)
ASSERT_NE(result, nullptr);
ASSERT_STREQ("name", result->path.name().data());
ASSERT_STREQ(PATH_SUFFIX.substr(1).c_str(), result->path.to_string().data());
nix_store_path_free(result);
}
TEST_F(nix_api_store_test, SetsLastErrCodeToNixOk)
{
nix_store_parse_path(ctx, store, (nixStoreDir + PATH_SUFFIX).c_str());
StorePath * path = nix_store_parse_path(ctx, store, (nixStoreDir + PATH_SUFFIX).c_str());
ASSERT_EQ(ctx->last_err_code, NIX_OK);
nix_store_path_free(path);
}
TEST_F(nix_api_store_test, DoesNotCrashWhenContextIsNull)
{
ASSERT_NO_THROW(nix_store_parse_path(ctx, store, (nixStoreDir + PATH_SUFFIX).c_str()));
StorePath * path = nullptr;
ASSERT_NO_THROW(path = nix_store_parse_path(ctx, store, (nixStoreDir + PATH_SUFFIX).c_str()));
nix_store_path_free(path);
}
TEST_F(nix_api_store_test, get_version)
@ -119,6 +123,7 @@ TEST_F(nix_api_store_test, nix_store_is_valid_path_not_in_store)
{
StorePath * path = nix_store_parse_path(ctx, store, (nixStoreDir + PATH_SUFFIX).c_str());
ASSERT_EQ(false, nix_store_is_valid_path(ctx, store, path));
nix_store_path_free(path);
}
TEST_F(nix_api_store_test, nix_store_real_path)

View file

@ -613,9 +613,6 @@ Goal::Co DerivationBuildingGoal::tryToBuild()
void closeLogFile() override {
goal.closeLogFile();
}
SingleDrvOutputs assertPathValidity() override {
return goal.assertPathValidity();
}
void appendLogTailErrorMsg(std::string & msg) override {
goal.appendLogTailErrorMsg(msg);
}

View file

@ -210,6 +210,13 @@ Goal::Co DerivationGoal::haveDerivation(StorePath drvPath)
.outputs = wantedOutputs,
});
if (buildMode == bmCheck) {
/* In checking mode, the builder will not register any outputs.
So we want to make sure the ones that we wanted to check are
properly there. */
buildResult.builtOutputs = assertPathValidity(drvPath);
}
co_return amDone(g->exitCode, g->ex);
};

View file

@ -1333,6 +1333,11 @@ nlohmann::json Derivation::toJSON(const StoreDirConfig & store) const
res["args"] = args;
res["env"] = env;
if (auto it = env.find("__json"); it != env.end()) {
res["env"].erase("__json");
res["structuredAttrs"] = nlohmann::json::parse(it->second);
}
return res;
}
@ -1396,7 +1401,17 @@ Derivation Derivation::fromJSON(
res.platform = getString(valueAt(json, "system"));
res.builder = getString(valueAt(json, "builder"));
res.args = getStringList(valueAt(json, "args"));
res.env = getStringMap(valueAt(json, "env"));
auto envJson = valueAt(json, "env");
try {
res.env = getStringMap(envJson);
} catch (Error & e) {
e.addTrace({}, "while reading key 'env'");
throw;
}
if (auto structuredAttrs = get(json, "structuredAttrs"))
res.env.insert_or_assign("__json", structuredAttrs->dump());
return res;
}

View file

@ -46,7 +46,7 @@ struct FileTransferSettings : Config
)"};
Setting<unsigned int> tries{this, 5, "download-attempts",
"How often Nix will attempt to download a file before giving up."};
"The number of times Nix attempts to download a file before giving up."};
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
R"(

View file

@ -109,7 +109,7 @@ public:
Setting<bool> tryFallback{
this, false, "fallback",
R"(
If set to `true`, Nix will fall back to building from source if a
If set to `true`, Nix falls back to building from source if a
binary substitute fails. This is equivalent to the `--fallback`
flag. The default is `false`.
)",
@ -127,11 +127,11 @@ public:
MaxBuildJobsSetting maxBuildJobs{
this, 1, "max-jobs",
R"(
Maximum number of jobs that Nix will try to build locally in parallel.
Maximum number of jobs that Nix tries to build locally in parallel.
The special value `auto` causes Nix to use the number of CPUs in your system.
Use `0` to disable local builds and directly use the remote machines specified in [`builders`](#conf-builders).
This will not affect derivations that have [`preferLocalBuild = true`](@docroot@/language/advanced-attributes.md#adv-attr-preferLocalBuild), which are always built locally.
This doesn't affect derivations that have [`preferLocalBuild = true`](@docroot@/language/advanced-attributes.md#adv-attr-preferLocalBuild), which are always built locally.
> **Note**
>
@ -146,8 +146,8 @@ public:
this, 16, "max-substitution-jobs",
R"(
This option defines the maximum number of substitution jobs that Nix
will try to run in parallel. The default is `16`. The minimum value
one can choose is `1` and lower values will be interpreted as `1`.
tries to run in parallel. The default is `16`. The minimum value
one can choose is `1` and lower values are interpreted as `1`.
)",
{"substitution-max-jobs"}};
@ -164,7 +164,7 @@ public:
A very generic example using `derivation` and `xargs` may be more appropriate to explain the mechanism.
Using `mkDerivation` as an example requires being aware of that there are multiple independent layers that are completely opaque here.
-->
For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it will pass the `-j${NIX_BUILD_CORES}` flag to GNU Make.
For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it passes the `-j${NIX_BUILD_CORES}` flag to GNU Make.
The value `0` means that the `builder` should use all available CPU cores in the system.
@ -186,7 +186,7 @@ public:
this, NIX_LOCAL_SYSTEM, "system",
R"(
The system type of the current Nix installation.
Nix will only build a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms).
Nix only builds a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in [`extra-platforms`](#conf-extra-platforms).
The default value is set when Nix itself is compiled for the system it will run on.
The following system types are widely used, as Nix is actively supported on these platforms:
@ -292,28 +292,28 @@ public:
> `i686-linux,x86_64-linux`
3. The SSH identity file to be used to log in to the remote machine.
If omitted, SSH will use its regular identities.
If omitted, SSH uses its regular identities.
> **Example**
>
> `/home/user/.ssh/id_mac`
4. The maximum number of builds that Nix will execute in parallel on the machine.
4. The maximum number of builds that Nix executes in parallel on the machine.
Typically this should be equal to the number of CPU cores.
5. The speed factor, indicating the relative speed of the machine as a positive integer.
If there are multiple machines of the right type, Nix will prefer the fastest, taking load into account.
If there are multiple machines of the right type, Nix prefers the fastest, taking load into account.
6. A comma-separated list of supported [system features](#conf-system-features).
A machine will only be used to build a derivation if all the features in the derivation's [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute are supported by that machine.
A machine is only used to build a derivation if all the features in the derivation's [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute are supported by that machine.
7. A comma-separated list of required [system features](#conf-system-features).
A machine will only be used to build a derivation if all of the machines required features appear in the derivations [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute.
A machine is only used to build a derivation if all of the machines required features appear in the derivations [`requiredSystemFeatures`](@docroot@/language/advanced-attributes.html#adv-attr-requiredSystemFeatures) attribute.
8. The (base64-encoded) public host key of the remote machine.
If omitted, SSH will use its regular `known_hosts` file.
If omitted, SSH uses its regular `known_hosts` file.
The value for this field can be obtained via `base64 -w0`.
@ -335,7 +335,7 @@ public:
> nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy 1 2 kvm benchmark
> ```
>
> However, `poochie` will only build derivations that have the attribute
> However, `poochie` only builds derivations that have the attribute
>
> ```nix
> requiredSystemFeatures = [ "benchmark" ];
@ -348,7 +348,7 @@ public:
> ```
>
> `itchy` cannot do builds that require `kvm`, but `scratchy` does support such builds.
> For regular builds, `itchy` will be preferred over `scratchy` because it has a higher speed factor.
> For regular builds, `itchy` is preferred over `scratchy` because it has a higher speed factor.
For Nix to use substituters, the calling user must be in the [`trusted-users`](#conf-trusted-users) list.
@ -365,22 +365,22 @@ public:
To build only on remote machines and disable local builds, set [`max-jobs`](#conf-max-jobs) to 0.
If you want the remote machines to use substituters, set [`builders-use-substitutes`](#conf-builders-use-substituters) to `true`.
If you want the remote machines to use substituters, set [`builders-use-substitutes`](#conf-builders-use-substitutes) to `true`.
)",
{}, false};
Setting<bool> alwaysAllowSubstitutes{
this, false, "always-allow-substitutes",
R"(
If set to `true`, Nix will ignore the [`allowSubstitutes`](@docroot@/language/advanced-attributes.md) attribute in derivations and always attempt to use [available substituters](#conf-substituters).
If set to `true`, Nix ignores the [`allowSubstitutes`](@docroot@/language/advanced-attributes.md) attribute in derivations and always attempt to use [available substituters](#conf-substituters).
)"};
Setting<bool> buildersUseSubstitutes{
this, false, "builders-use-substitutes",
R"(
If set to `true`, Nix will instruct [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available.
If set to `true`, Nix instructs [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available.
It means that remote build hosts will fetch as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all.
It means that remote build hosts fetches as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all.
This can drastically reduce build times if the network connection between the local machine and the remote build host is slow.
)"};
@ -415,7 +415,7 @@ public:
Setting<bool> useSubstitutes{
this, true, "substitute",
R"(
If set to `true` (default), Nix will use binary substitutes if
If set to `true` (default), Nix uses binary substitutes if
available. This option can be disabled to force building from
source.
)",
@ -432,11 +432,11 @@ public:
since that would allow him/her to influence the build result.
Therefore, if this option is non-empty and specifies a valid group,
builds will be performed under the user accounts that are a member
builds are performed under the user accounts that are a member
of the group specified here (as listed in `/etc/group`). Those user
accounts should not be used for any other purpose\!
Nix will never run two builds under the same user account at the
Nix never runs two builds under the same user account at the
same time. This is to prevent an obvious security hole: a malicious
user writing a Nix expression that modifies the build result of a
legitimate Nix expression being built by another user. Therefore it
@ -448,7 +448,7 @@ public:
by the Nix account, its group should be the group specified here,
and its mode should be `1775`.
If the build users group is empty, builds will be performed under
If the build users group is empty, builds areperformed under
the uid of the Nix process (that is, the uid of the caller if
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used
@ -503,7 +503,7 @@ public:
Setting<bool> keepLog{
this, true, "keep-build-log",
R"(
If set to `true` (the default), Nix will write the build log of a
If set to `true` (the default), Nix writes the build log of a
derivation (i.e. the standard output and error of its builder) to
the directory `/nix/var/log/nix/drvs`. The build log can be
retrieved using the command `nix-store -l path`.
@ -514,8 +514,8 @@ public:
this, true, "compress-build-log",
R"(
If set to `true` (the default), build logs written to
`/nix/var/log/nix/drvs` will be compressed on the fly using bzip2.
Otherwise, they will not be compressed.
`/nix/var/log/nix/drvs` are compressed on the fly using bzip2.
Otherwise, they are not compressed.
)",
{"build-compress-log"}};
@ -534,14 +534,14 @@ public:
Setting<bool> gcKeepOutputs{
this, false, "keep-outputs",
R"(
If `true`, the garbage collector will keep the outputs of
non-garbage derivations. If `false` (default), outputs will be
If `true`, the garbage collector keeps the outputs of
non-garbage derivations. If `false` (default), outputs are
deleted unless they are GC roots themselves (or reachable from other
roots).
In general, outputs must be registered as roots separately. However,
even if the output of a derivation is registered as a root, the
collector will still delete store paths that are used only at build
collector still deletes store paths that are used only at build
time (e.g., the C compiler, or source tarballs downloaded from the
network). To prevent it from doing so, set this option to `true`.
)",
@ -550,9 +550,9 @@ public:
Setting<bool> gcKeepDerivations{
this, true, "keep-derivations",
R"(
If `true` (default), the garbage collector will keep the derivations
from which non-garbage store paths were built. If `false`, they will
be deleted unless explicitly registered as a root (or reachable from
If `true` (default), the garbage collector keeps the derivations
from which non-garbage store paths were built. If `false`, they are
deleted unless explicitly registered as a root (or reachable from
other roots).
Keeping derivation around is useful for querying and traceability
@ -582,7 +582,7 @@ public:
If `true`, when you add a Nix derivation to a user environment, the
path of the derivation is stored in the user environment. Thus, the
derivation will not be garbage-collected until the user environment
derivation isn't garbage-collected until the user environment
generation is deleted (`nix-env --delete-generations`). To prevent
build-time-only dependencies from being collected, you should also
turn on `keep-outputs`.
@ -603,9 +603,9 @@ public:
#endif
, "sandbox",
R"(
If set to `true`, builds will be performed in a *sandboxed
If set to `true`, builds are performed in a *sandboxed
environment*, i.e., theyre isolated from the normal file system
hierarchy and will only see their dependencies in the Nix store,
hierarchy and only see their dependencies in the Nix store,
the temporary build directory, private versions of `/proc`,
`/dev`, `/dev/shm` and `/dev/pts` (on Linux), and the paths
configured with the `sandbox-paths` option. This is useful to
@ -634,13 +634,13 @@ public:
R"(
A list of paths bind-mounted into Nix sandbox environments. You can
use the syntax `target=source` to mount a path in a different
location in the sandbox; for instance, `/bin=/nix-bin` will mount
location in the sandbox; for instance, `/bin=/nix-bin` mounts
the path `/nix-bin` as `/bin` inside the sandbox. If *source* is
followed by `?`, then it is not an error if *source* does not exist;
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl`
only be mounted in the sandbox if it exists in the host filesystem.
If the source is in the Nix store, then its closure will be added to
If the source is in the Nix store, then its closure is added to
the sandbox as well.
Depending on how Nix was built, the default value for this option
@ -655,15 +655,15 @@ public:
Setting<bool> requireDropSupplementaryGroups{this, isRootUser(), "require-drop-supplementary-groups",
R"(
Following the principle of least privilege,
Nix will attempt to drop supplementary groups when building with sandboxing.
Nix attempts to drop supplementary groups when building with sandboxing.
However this can fail under some circumstances.
For example, if the user lacks the `CAP_SETGID` capability.
Search `setgroups(2)` for `EPERM` to find more detailed information on this.
If you encounter such a failure, setting this option to `false` will let you ignore it and continue.
If you encounter such a failure, setting this option to `false` enables you to ignore it and continue.
But before doing so, you should consider the security implications carefully.
Not dropping supplementary groups means the build sandbox will be less restricted than intended.
Not dropping supplementary groups means the build sandbox is less restricted than intended.
This option defaults to `true` when the user is root
(since `root` usually has permissions to call setgroups)
@ -697,14 +697,7 @@ public:
Setting<std::optional<Path>> buildDir{this, std::nullopt, "build-dir",
R"(
The directory on the host, in which derivations' temporary build directories are created.
If not set, Nix will use the system temporary directory indicated by the `TMPDIR` environment variable.
Note that builds are often performed by the Nix daemon, so its `TMPDIR` is used, and not that of the Nix command line interface.
This is also the location where [`--keep-failed`](@docroot@/command-ref/opt-common.md#opt-keep-failed) leaves its files.
If Nix runs without sandbox, or if the platform does not support sandboxing with bind mounts (e.g. macOS), then the [`builder`](@docroot@/language/derivations.md#attr-builder)'s environment will contain this directory, instead of the virtual location [`sandbox-build-dir`](#conf-sandbox-build-dir).
Override the `build-dir` store setting for all stores that have this setting.
)"};
Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
@ -745,12 +738,11 @@ public:
3. The path to the build's derivation
4. The path to the build's scratch directory. This directory will
exist only if the build was run with `--keep-failed`.
4. The path to the build's scratch directory. This directory
exists only if the build was run with `--keep-failed`.
The stderr and stdout output from the diff hook will not be
displayed to the user. Instead, it will print to the nix-daemon's
log.
The stderr and stdout output from the diff hook isn't
displayed to the user. Instead, it print to the nix-daemon's log.
When using the Nix daemon, `diff-hook` must be set in the `nix.conf`
configuration file, and cannot be passed at the command line.
@ -788,8 +780,8 @@ public:
this, 60 * 60, "tarball-ttl",
R"(
The number of seconds a downloaded tarball is considered fresh. If
the cached tarball is stale, Nix will check whether it is still up
to date using the ETag header. Nix will download a new version if
the cached tarball is stale, Nix checks whether it is still up
to date using the ETag header. Nix downloads a new version if
the ETag header is unsupported, or the cached ETag doesn't match.
Setting the TTL to `0` forces Nix to always check if the tarball is
@ -824,7 +816,7 @@ public:
R"(
System types of executables that can be run on this machine.
Nix will only build a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in the [`system` option](#conf-system).
Nix only builds a given [store derivation](@docroot@/glossary.md#gloss-store-derivation) locally when its `system` attribute equals any of the values specified here or in the [`system` option](#conf-system).
Setting this can be useful to build derivations locally on compatible machines:
- `i686-linux` executables can be run on `x86_64-linux` machines (set by default)
@ -834,7 +826,7 @@ public:
- `qemu-user` may be used to support non-native platforms (though this
may be slow and buggy)
Build systems will usually detect the target platform to be the current physical system and therefore produce machine code incompatible with what may be intended in the derivation.
Build systems usually detect the target platform to be the current physical system and therefore produce machine code incompatible with what may be intended in the derivation.
You should design your derivation's `builder` accordingly and cross-check the results when using this option against natively-built versions of your derivation.
)",
{},
@ -924,7 +916,7 @@ public:
this, 3600, "narinfo-cache-negative-ttl",
R"(
The TTL in seconds for negative lookups.
If a store path is queried from a [substituter](#conf-substituters) but was not found, there will be a negative lookup cached in the local disk cache database for the specified duration.
If a store path is queried from a [substituter](#conf-substituters) but was not found, a negative lookup is cached in the local disk cache database for the specified duration.
Set to `0` to force updating the lookup cache.
@ -940,7 +932,7 @@ public:
this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
R"(
The TTL in seconds for positive lookups. If a store path is queried
from a substituter, the result of the query will be cached in the
from a substituter, the result of the query is cached in the
local disk cache database including some of the NAR metadata. The
default TTL is a month, setting a shorter TTL for positive lookups
can be useful for binary caches that have frequent garbage
@ -1026,7 +1018,7 @@ public:
Setting<std::string> netrcFile{
this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
R"(
If set to an absolute path to a `netrc` file, Nix will use the HTTP
If set to an absolute path to a `netrc` file, Nix uses the HTTP
authentication credentials in this file when trying to download from
a remote host through HTTP or HTTPS. Defaults to
`$NIX_CONF_DIR/netrc`.
@ -1052,7 +1044,7 @@ public:
this, getDefaultSSLCertFile(), "ssl-cert-file",
R"(
The path of a file containing CA certificates used to
authenticate `https://` downloads. Nix by default will use
authenticate `https://` downloads. Nix by default uses
the first of the following files that exists:
1. `/etc/ssl/certs/ca-certificates.crt`
@ -1084,7 +1076,7 @@ public:
(Linux-specific.) By default, builders on Linux cannot acquire new
privileges by calling setuid/setgid programs or programs that have
file capabilities. For example, programs such as `sudo` or `ping`
will fail. (Note that in sandbox builds, no such programs are
should fail. (Note that in sandbox builds, no such programs are
available unless you bind-mount them into the sandbox via the
`sandbox-paths` option.) You can allow the use of such programs by
enabling this option. This is impure and usually undesirable, but
@ -1108,7 +1100,7 @@ public:
this, {}, "hashed-mirrors",
R"(
A list of web servers used by `builtins.fetchurl` to obtain files by
hash. Given a hash algorithm *ha* and a base-16 hash *h*, Nix will try to
hash. Given a hash algorithm *ha* and a base-16 hash *h*, Nix tries to
download the file from *hashed-mirror*/*ha*/*h*. This allows files to
be downloaded even if they have disappeared from their original URI.
For example, given an example mirror `http://tarballs.nixos.org/`,
@ -1123,7 +1115,7 @@ public:
Nix will attempt to download this file from
`http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae`
first. If it is not available there, if will try the original URI.
first. If it is not available there, it tries the original URI.
)"};
Setting<uint64_t> minFree{
@ -1155,8 +1147,8 @@ public:
Setting<bool> allowSymlinkedStore{
this, false, "allow-symlinked-store",
R"(
If set to `true`, Nix will stop complaining if the store directory
(typically /nix/store) contains symlink components.
If set to `true`, Nix stops complaining if the store directory
(typically `/nix/store`) contains symlink components.
This risks making some builds "impure" because builders sometimes
"canonicalise" paths by resolving all symlink components. Problems
@ -1168,7 +1160,7 @@ public:
Setting<bool> useXDGBaseDirectories{
this, false, "use-xdg-base-directories",
R"(
If set to `true`, Nix will conform to the [XDG Base Directory Specification] for files in `$HOME`.
If set to `true`, Nix conforms to the [XDG Base Directory Specification] for files in `$HOME`.
The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/command-ref/env-common.md).
[XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
@ -1206,7 +1198,7 @@ public:
If the user is trusted (see `trusted-users` option), when building
a fixed-output derivation, environment variables set in this option
will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md#adv-attr-impureEnvVars).
is passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md#adv-attr-impureEnvVars).
This option is useful for, e.g., setting `https_proxy` for
fixed-output derivations and in a multi-user Nix installation, or

View file

@ -27,12 +27,12 @@ struct LocalFSStoreConfig : virtual StoreConfig
PathSetting stateDir{this,
rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir,
"state",
"Directory where Nix will store state."};
"Directory where Nix stores state."};
PathSetting logDir{this,
rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir,
"log",
"directory where Nix will store log files."};
"directory where Nix stores log files."};
PathSetting realStoreDir{this,
rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real",

View file

@ -34,7 +34,39 @@ struct OptimiseStats
uint64_t bytesFreed = 0;
};
struct LocalStoreConfig : std::enable_shared_from_this<LocalStoreConfig>, virtual LocalFSStoreConfig
struct LocalBuildStoreConfig : virtual LocalFSStoreConfig
{
private:
/**
Input for computing the build directory. See `getBuildDir()`.
*/
Setting<std::optional<Path>> buildDir{this, std::nullopt, "build-dir",
R"(
The directory on the host, in which derivations' temporary build directories are created.
If not set, Nix will use the `builds` subdirectory of its configured state directory.
Note that builds are often performed by the Nix daemon, so its `build-dir` applies.
Nix will create this directory automatically with suitable permissions if it does not exist.
Otherwise its permissions must allow all users to traverse the directory (i.e. it must have `o+x` set, in unix parlance) for non-sandboxed builds to work correctly.
This is also the location where [`--keep-failed`](@docroot@/command-ref/opt-common.md#opt-keep-failed) leaves its files.
If Nix runs without sandbox, or if the platform does not support sandboxing with bind mounts (e.g. macOS), then the [`builder`](@docroot@/language/derivations.md#attr-builder)'s environment will contain this directory, instead of the virtual location [`sandbox-build-dir`](#conf-sandbox-build-dir).
> **Warning**
>
> `build-dir` must not be set to a world-writable directory.
> Placing temporary build directories in a world-writable place allows other users to access or modify build data that is currently in use.
> This alone is merely an impurity, but combined with another factor this has allowed malicious derivations to escape the build sandbox.
)"};
public:
Path getBuildDir() const;
};
struct LocalStoreConfig : std::enable_shared_from_this<LocalStoreConfig>, virtual LocalFSStoreConfig, virtual LocalBuildStoreConfig
{
using LocalFSStoreConfig::LocalFSStoreConfig;
@ -54,7 +86,7 @@ struct LocalStoreConfig : std::enable_shared_from_this<LocalStoreConfig>, virtua
R"(
Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
Normally Nix attempts to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.

View file

@ -25,7 +25,7 @@ struct S3BinaryCacheStoreConfig : std::enable_shared_from_this<S3BinaryCacheStor
"profile",
R"(
The name of the AWS configuration profile to use. By default
Nix will use the `default` profile.
Nix uses the `default` profile.
)"};
protected:
@ -40,7 +40,7 @@ public:
"region",
R"(
The region of the S3 bucket. If your bucket is not in
`useast-1`, you should always explicitly specify the region
`us-east-1`, you should always explicitly specify the region
parameter.
)"};
@ -69,7 +69,7 @@ public:
> **Note**
>
> This endpoint must support HTTPS and will use path-based
> This endpoint must support HTTPS and uses path-based
> addressing instead of virtual host based addressing.
)"};

View file

@ -2,4 +2,5 @@ include_dirs += include_directories('../..')
headers += files(
'personality.hh',
# hack for trailing newline
)

View file

@ -1,5 +1,6 @@
sources += files(
'personality.cc',
# hack for trailing newline
)
subdir('include/nix/store')

View file

@ -77,6 +77,16 @@ std::string LocalStoreConfig::doc()
;
}
Path LocalBuildStoreConfig::getBuildDir() const
{
return
settings.buildDir.get().has_value()
? *settings.buildDir.get()
: buildDir.get().has_value()
? *buildDir.get()
: stateDir.get() + "/builds";
}
ref<Store> LocalStore::Config::openStore() const
{
return make_ref<LocalStore>(ref{shared_from_this()});
@ -133,7 +143,7 @@ LocalStore::LocalStore(ref<const Config> config)
Path gcRootsDir = config->stateDir + "/gcroots";
if (!pathExists(gcRootsDir)) {
createDirs(gcRootsDir);
createSymlink(profilesDir, gcRootsDir + "/profiles");
replaceSymlink(profilesDir, gcRootsDir + "/profiles");
}
for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) {
@ -247,7 +257,7 @@ LocalStore::LocalStore(ref<const Config> config)
else if (curSchema == 0) { /* new store */
curSchema = nixSchemaVersion;
openDB(*state, true);
writeFile(schemaPath, fmt("%1%", curSchema), 0666, true);
writeFile(schemaPath, fmt("%1%", curSchema), 0666, FsSync::Yes);
}
else if (curSchema < nixSchemaVersion) {
@ -298,7 +308,7 @@ LocalStore::LocalStore(ref<const Config> config)
txn.commit();
}
writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true);
writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, FsSync::Yes);
lockFile(globalLock.get(), ltRead, true);
}

View file

@ -95,6 +95,11 @@ protected:
*/
Path topTmpDir;
/**
* The file descriptor of the temporary directory.
*/
AutoCloseFD tmpDirFd;
/**
* The sort of derivation we are building.
*
@ -300,9 +305,24 @@ protected:
/**
* Make a file owned by the builder.
*
* SAFETY: this function is prone to TOCTOU as it receives a path and not a descriptor.
* It's only safe to call in a child of a directory only visible to the owner.
*/
void chownToBuilder(const Path & path);
/**
* Make a file owned by the builder addressed by its file descriptor.
*/
void chownToBuilder(int fd, const Path & path);
/**
* Create a file in `tmpDir` owned by the builder.
*/
void writeBuilderFile(
const std::string & name,
std::string_view contents);
/**
* Run the builder's process.
*/
@ -678,6 +698,18 @@ static void handleChildException(bool sendException)
}
}
static bool checkNotWorldWritable(std::filesystem::path path)
{
while (true) {
auto st = lstat(path);
if (st.st_mode & S_IWOTH)
return false;
if (path == path.parent_path()) break;
path = path.parent_path();
}
return true;
}
void DerivationBuilderImpl::startBuilder()
{
/* Make sure that no other processes are executing under the
@ -700,17 +732,31 @@ void DerivationBuilderImpl::startBuilder()
// since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should tell them to run the command to install Darwin 2
if (drv.platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin")
msg += fmt("\nNote: run `%s` to run programs for x86_64-darwin", Magenta("/usr/sbin/softwareupdate --install-rosetta"));
msg += fmt("\nNote: run `%s` to run programs for x86_64-darwin", Magenta("/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon"));
throw BuildError(msg);
}
auto buildDir = getLocalStore(store).config->getBuildDir();
createDirs(buildDir);
if (buildUser && !checkNotWorldWritable(buildDir))
throw Error("Path %s or a parent directory is world-writable or a symlink. That's not allowed for security.", buildDir);
/* Create a temporary directory where the build will take
place. */
topTmpDir = createTempDir(settings.buildDir.get().value_or(""), "nix-build-" + std::string(drvPath.name()), 0700);
topTmpDir = createTempDir(buildDir, "nix-build-" + std::string(drvPath.name()), 0700);
setBuildTmpDir();
assert(!tmpDir.empty());
chownToBuilder(tmpDir);
/* The TOCTOU between the previous mkdir call and this open call is unavoidable due to
POSIX semantics.*/
tmpDirFd = AutoCloseFD{open(tmpDir.c_str(), O_RDONLY | O_NOFOLLOW | O_DIRECTORY)};
if (!tmpDirFd)
throw SysError("failed to open the build temporary directory descriptor '%1%'", tmpDir);
chownToBuilder(tmpDirFd.get(), tmpDir);
for (auto & [outputName, status] : initialOutputs) {
/* Set scratch path we'll actually use during the build.
@ -876,7 +922,7 @@ DerivationBuilderImpl::PathsInChroot DerivationBuilderImpl::getPathsInSandbox()
store.computeFSClosure(store.toStorePath(i.second.source).first, closure);
} catch (InvalidPath & e) {
} catch (Error & e) {
e.addTrace({}, "while processing 'sandbox-paths'");
e.addTrace({}, "while processing sandbox path '%s'", i.second.source);
throw;
}
for (auto & i : closure) {
@ -1049,13 +1095,10 @@ void DerivationBuilderImpl::initEnv()
} else {
auto hash = hashString(HashAlgorithm::SHA256, i.first);
std::string fn = ".attr-" + hash.to_string(HashFormat::Nix32, false);
Path p = tmpDir + "/" + fn;
writeFile(p, rewriteStrings(i.second, inputRewrites));
chownToBuilder(p);
writeBuilderFile(fn, rewriteStrings(i.second, inputRewrites));
env[i.first + "Path"] = tmpDirInSandbox() + "/" + fn;
}
}
}
/* For convenience, set an environment pointing to the top build
@ -1130,11 +1173,9 @@ void DerivationBuilderImpl::writeStructuredAttrs()
auto jsonSh = StructuredAttrs::writeShell(json);
writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
chownToBuilder(tmpDir + "/.attrs.sh");
writeBuilderFile(".attrs.sh", rewriteStrings(jsonSh, inputRewrites));
env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox() + "/.attrs.sh";
writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
chownToBuilder(tmpDir + "/.attrs.json");
writeBuilderFile(".attrs.json", rewriteStrings(json.dump(), inputRewrites));
env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox() + "/.attrs.json";
}
}
@ -1255,6 +1296,25 @@ void DerivationBuilderImpl::chownToBuilder(const Path & path)
throw SysError("cannot change ownership of '%1%'", path);
}
void DerivationBuilderImpl::chownToBuilder(int fd, const Path & path)
{
if (!buildUser) return;
if (fchown(fd, buildUser->getUID(), buildUser->getGID()) == -1)
throw SysError("cannot change ownership of file '%1%'", path);
}
void DerivationBuilderImpl::writeBuilderFile(
const std::string & name,
std::string_view contents)
{
auto path = std::filesystem::path(tmpDir) / name;
AutoCloseFD fd{openat(tmpDirFd.get(), name.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC | O_EXCL | O_NOFOLLOW, 0666)};
if (!fd)
throw SysError("creating file %s", path);
writeFile(fd, path, contents);
chownToBuilder(fd.get(), path);
}
void DerivationBuilderImpl::runChild()
{
/* Warning: in the child we should absolutely not make any SQLite
@ -1871,7 +1931,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs()
also a source for non-determinism. */
if (delayedException)
std::rethrow_exception(delayedException);
return miscMethods->assertPathValidity();
return {};
}
/* Apply output checks. */
@ -2063,6 +2123,15 @@ void DerivationBuilderImpl::checkOutputs(const std::map<std::string, ValidPathIn
void DerivationBuilderImpl::deleteTmpDir(bool force)
{
if (topTmpDir != "") {
/* As an extra precaution, even in the event of `deletePath` failing to
* clean up, the `tmpDir` will be chowned as if we were to move
* it inside the Nix store.
*
* This hardens against an attack which smuggles a file descriptor
* to make use of the temporary directory.
*/
chmod(topTmpDir.c_str(), 0000);
/* Don't keep temporary directories for builtins because they
might have privileged stuff (like a copy of netrc). */
if (settings.keepFailed && !force && !drv.isBuiltin()) {

View file

@ -97,14 +97,6 @@ struct DerivationBuilderCallbacks
*/
virtual void closeLogFile() = 0;
/**
* Aborts if any output is not valid or corrupt, and otherwise
* returns a 'SingleDrvOutputs' structure containing all outputs.
*
* @todo Probably should just be in `DerivationGoal`.
*/
virtual SingleDrvOutputs assertPathValidity() = 0;
virtual void appendLogTailErrorMsg(std::string & msg) = 0;
/**

View file

@ -65,6 +65,7 @@ sources = files(
'position.cc',
'processes.cc',
'references.cc',
'sort.cc',
'spawn.cc',
'strings.cc',
'suggestions.cc',

274
src/libutil-tests/sort.cc Normal file
View file

@ -0,0 +1,274 @@
#include <gtest/gtest.h>
#include <rapidcheck/gtest.h>
#include "nix/util/sort.hh"
#include <vector>
#include <list>
#include <algorithm>
#include <random>
namespace nix {
struct MonotonicSubranges : public ::testing::Test
{
std::vector<int> empty_;
std::vector<int> basic_ = {1, 0, -1, -100, 10, 10, 20, 40, 5, 5, 20, 10, 10, 1, -5};
};
TEST_F(MonotonicSubranges, empty)
{
ASSERT_EQ(weaklyIncreasingPrefix(empty_.begin(), empty_.end()), empty_.begin());
ASSERT_EQ(weaklyIncreasingSuffix(empty_.begin(), empty_.end()), empty_.begin());
ASSERT_EQ(strictlyDecreasingPrefix(empty_.begin(), empty_.end()), empty_.begin());
ASSERT_EQ(strictlyDecreasingSuffix(empty_.begin(), empty_.end()), empty_.begin());
}
TEST_F(MonotonicSubranges, basic)
{
ASSERT_EQ(strictlyDecreasingPrefix(basic_.begin(), basic_.end()), basic_.begin() + 4);
ASSERT_EQ(strictlyDecreasingSuffix(basic_.begin(), basic_.end()), basic_.begin() + 12);
std::reverse(basic_.begin(), basic_.end());
ASSERT_EQ(weaklyIncreasingPrefix(basic_.begin(), basic_.end()), basic_.begin() + 5);
ASSERT_EQ(weaklyIncreasingSuffix(basic_.begin(), basic_.end()), basic_.begin() + 11);
}
template<typename T>
class SortTestPermutations : public ::testing::Test
{
std::vector<T> initialData = {std::numeric_limits<T>::max(), std::numeric_limits<T>::min(), 0, 0, 42, 126, 36};
std::vector<T> vectorData;
std::list<T> listData;
public:
std::vector<T> scratchVector;
std::list<T> scratchList;
std::vector<T> empty;
void SetUp() override
{
vectorData = initialData;
std::sort(vectorData.begin(), vectorData.end());
listData = std::list(vectorData.begin(), vectorData.end());
}
bool nextPermutation()
{
std::next_permutation(vectorData.begin(), vectorData.end());
std::next_permutation(listData.begin(), listData.end());
scratchList = listData;
scratchVector = vectorData;
return vectorData == initialData;
}
};
using SortPermutationsTypes = ::testing::Types<int, long long, short, unsigned, unsigned long>;
TYPED_TEST_SUITE(SortTestPermutations, SortPermutationsTypes);
TYPED_TEST(SortTestPermutations, insertionsort)
{
while (!this->nextPermutation()) {
auto & list = this->scratchList;
insertionsort(list.begin(), list.end());
ASSERT_TRUE(std::is_sorted(list.begin(), list.end()));
auto & vector = this->scratchVector;
insertionsort(vector.begin(), vector.end());
ASSERT_TRUE(std::is_sorted(vector.begin(), vector.end()));
}
}
TYPED_TEST(SortTestPermutations, peeksort)
{
while (!this->nextPermutation()) {
auto & vector = this->scratchVector;
peeksort(vector.begin(), vector.end());
ASSERT_TRUE(std::is_sorted(vector.begin(), vector.end()));
}
}
TEST(InsertionSort, empty)
{
std::vector<int> empty;
insertionsort(empty.begin(), empty.end());
}
struct RandomPeekSort : public ::testing::TestWithParam<
std::tuple</*maxSize*/ std::size_t, /*min*/ int, /*max*/ int, /*iterations*/ std::size_t>>
{
using ValueType = int;
std::vector<ValueType> data_;
std::mt19937 urng_;
std::uniform_int_distribution<int> distribution_;
void SetUp() override
{
auto [maxSize, min, max, iterations] = GetParam();
urng_ = std::mt19937(GTEST_FLAG_GET(random_seed));
distribution_ = std::uniform_int_distribution<int>(min, max);
}
auto regenerate()
{
auto [maxSize, min, max, iterations] = GetParam();
std::size_t dataSize = std::uniform_int_distribution<std::size_t>(0, maxSize)(urng_);
data_.resize(dataSize);
std::generate(data_.begin(), data_.end(), [&]() { return distribution_(urng_); });
}
};
TEST_P(RandomPeekSort, defaultComparator)
{
auto [maxSize, min, max, iterations] = GetParam();
for (std::size_t i = 0; i < iterations; ++i) {
regenerate();
peeksort(data_.begin(), data_.end());
ASSERT_TRUE(std::is_sorted(data_.begin(), data_.end()));
/* Sorting is idempotent */
peeksort(data_.begin(), data_.end());
ASSERT_TRUE(std::is_sorted(data_.begin(), data_.end()));
}
}
TEST_P(RandomPeekSort, greater)
{
auto [maxSize, min, max, iterations] = GetParam();
for (std::size_t i = 0; i < iterations; ++i) {
regenerate();
peeksort(data_.begin(), data_.end(), std::greater<int>{});
ASSERT_TRUE(std::is_sorted(data_.begin(), data_.end(), std::greater<int>{}));
/* Sorting is idempotent */
peeksort(data_.begin(), data_.end(), std::greater<int>{});
ASSERT_TRUE(std::is_sorted(data_.begin(), data_.end(), std::greater<int>{}));
}
}
TEST_P(RandomPeekSort, brokenComparator)
{
auto [maxSize, min, max, iterations] = GetParam();
/* This is a pretty nice way of modeling a worst-case scenario for a broken comparator.
If the sorting algorithm doesn't break in such case, then surely all deterministic
predicates won't break it. */
auto comp = [&]([[maybe_unused]] const auto & lhs, [[maybe_unused]] const auto & rhs) -> bool {
return std::uniform_int_distribution<unsigned>(0, 1)(urng_);
};
for (std::size_t i = 0; i < iterations; ++i) {
regenerate();
auto originalData = data_;
peeksort(data_.begin(), data_.end(), comp);
/* Check that the output is just a reordering of the input. This is the
contract of the implementation in regard to comparators that don't
define a strict weak order. */
std::sort(data_.begin(), data_.end());
std::sort(originalData.begin(), originalData.end());
ASSERT_EQ(originalData, data_);
}
}
TEST_P(RandomPeekSort, stability)
{
auto [maxSize, min, max, iterations] = GetParam();
for (std::size_t i = 0; i < iterations; ++i) {
regenerate();
std::vector<std::pair<int, int>> pairs;
/* Assign sequential ids to objects. After the sort ids for equivalent
elements should be in ascending order. */
std::transform(
data_.begin(), data_.end(), std::back_inserter(pairs), [id = std::size_t{0}](auto && val) mutable {
return std::pair{val, ++id};
});
auto comp = [&]([[maybe_unused]] const auto & lhs, [[maybe_unused]] const auto & rhs) -> bool {
return lhs.first > rhs.first;
};
peeksort(pairs.begin(), pairs.end(), comp);
ASSERT_TRUE(std::is_sorted(pairs.begin(), pairs.end(), comp));
for (auto begin = pairs.begin(), end = pairs.end(); begin < end; ++begin) {
auto key = begin->first;
auto innerEnd = std::find_if_not(begin, end, [key](const auto & lhs) { return lhs.first == key; });
ASSERT_TRUE(std::is_sorted(begin, innerEnd, [](const auto & lhs, const auto & rhs) {
return lhs.second < rhs.second;
}));
begin = innerEnd;
}
}
}
using RandomPeekSortParamType = RandomPeekSort::ParamType;
INSTANTIATE_TEST_SUITE_P(
PeekSort,
RandomPeekSort,
::testing::Values(
RandomPeekSortParamType{128, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), 1024},
RandomPeekSortParamType{7753, -32, 32, 128},
RandomPeekSortParamType{11719, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), 64},
RandomPeekSortParamType{4063, 0, 32, 256},
RandomPeekSortParamType{771, -8, 8, 2048},
RandomPeekSortParamType{433, 0, 1, 2048},
RandomPeekSortParamType{0, 0, 0, 1}, /* empty case */
RandomPeekSortParamType{
1, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), 1}, /* single element */
RandomPeekSortParamType{
2, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), 2}, /* two elements */
RandomPeekSortParamType{55425, std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), 128}));
template<typename T>
struct SortProperty : public ::testing::Test
{};
using SortPropertyTypes = ::testing::Types<int, unsigned, long long, short, std::string>;
TYPED_TEST_SUITE(SortProperty, SortPropertyTypes);
RC_GTEST_TYPED_FIXTURE_PROP(SortProperty, peeksortSorted, (std::vector<TypeParam> vec))
{
peeksort(vec.begin(), vec.end());
RC_ASSERT(std::is_sorted(vec.begin(), vec.end()));
}
RC_GTEST_TYPED_FIXTURE_PROP(SortProperty, peeksortSortedGreater, (std::vector<TypeParam> vec))
{
auto comp = std::greater<TypeParam>();
peeksort(vec.begin(), vec.end(), comp);
RC_ASSERT(std::is_sorted(vec.begin(), vec.end(), comp));
}
RC_GTEST_TYPED_FIXTURE_PROP(SortProperty, insertionsortSorted, (std::vector<TypeParam> vec))
{
insertionsort(vec.begin(), vec.end());
RC_ASSERT(std::is_sorted(vec.begin(), vec.end()));
}
RC_GTEST_PROP(SortProperty, peeksortStability, (std::vector<std::pair<char, char>> vec))
{
auto comp = [](auto lhs, auto rhs) { return lhs.first < rhs.first; };
auto copy = vec;
std::stable_sort(copy.begin(), copy.end(), comp);
peeksort(vec.begin(), vec.end(), comp);
RC_ASSERT(copy == vec);
}
RC_GTEST_TYPED_FIXTURE_PROP(SortProperty, peeksortSortedLinearComparisonComplexity, (std::vector<TypeParam> vec))
{
peeksort(vec.begin(), vec.end());
RC_ASSERT(std::is_sorted(vec.begin(), vec.end()));
std::size_t comparisonCount = 0;
auto countingComp = [&](auto lhs, auto rhs) {
++comparisonCount;
return lhs < rhs;
};
peeksort(vec.begin(), vec.end(), countingComp);
/* In the sorted case comparison complexify should be linear. */
RC_ASSERT(comparisonCount <= vec.size());
}
} // namespace nix

View file

@ -107,7 +107,7 @@ constexpr std::array<ExperimentalFeatureDetails, numXpFeatures> xpFeatureDetails
.name = "git-hashing",
.description = R"(
Allow creating (content-addressed) store objects which are hashed via Git's hashing algorithm.
These store objects will not be understandable by older versions of Nix.
These store objects aren't understandable by older versions of Nix.
)",
.trackingUrl = "https://github.com/NixOS/nix/milestone/41",
},

View file

@ -93,7 +93,7 @@ void restorePath(
{
switch (method) {
case FileSerialisationMethod::Flat:
writeFile(path, source, 0666, startFsync);
writeFile(path, source, 0666, startFsync ? FsSync::Yes : FsSync::No);
break;
case FileSerialisationMethod::NixArchive:
restorePath(path, source, startFsync);

View file

@ -304,7 +304,7 @@ void readFile(const Path & path, Sink & sink, bool memory_map)
}
void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync)
void writeFile(const Path & path, std::string_view s, mode_t mode, FsSync sync)
{
AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT
// TODO
@ -314,22 +314,29 @@ void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync)
, mode));
if (!fd)
throw SysError("opening file '%1%'", path);
try {
writeFull(fd.get(), s);
} catch (Error & e) {
e.addTrace({}, "writing file '%1%'", path);
throw;
}
if (sync)
fd.fsync();
// Explicitly close to make sure exceptions are propagated.
writeFile(fd, path, s, mode, sync);
/* Close explicitly to propagate the exceptions. */
fd.close();
if (sync)
syncParent(path);
}
void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode, FsSync sync)
{
assert(fd);
try {
writeFull(fd.get(), s);
void writeFile(const Path & path, Source & source, mode_t mode, bool sync)
if (sync == FsSync::Yes)
fd.fsync();
} catch (Error & e) {
e.addTrace({}, "writing file '%1%'", origPath);
throw;
}
}
void writeFile(const Path & path, Source & source, mode_t mode, FsSync sync)
{
AutoCloseFD fd = toDescriptor(open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT
// TODO
@ -353,11 +360,11 @@ void writeFile(const Path & path, Source & source, mode_t mode, bool sync)
e.addTrace({}, "writing file '%1%'", path);
throw;
}
if (sync)
if (sync == FsSync::Yes)
fd.fsync();
// Explicitly close to make sure exceptions are propagated.
fd.close();
if (sync)
if (sync == FsSync::Yes)
syncParent(path);
}
@ -435,7 +442,8 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
}
#endif
std::string name(baseNameOf(path.native()));
std::string name(path.filename());
assert(name != "." && name != ".." && !name.empty());
struct stat st;
if (fstatat(parentfd, name.c_str(), &st,
@ -476,7 +484,7 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
throw SysError("chmod %1%", path);
}
int fd = openat(parentfd, path.c_str(), O_RDONLY);
int fd = openat(parentfd, name.c_str(), O_RDONLY | O_DIRECTORY | O_NOFOLLOW);
if (fd == -1)
throw SysError("opening directory %1%", path);
AutoCloseDir dir(fdopendir(fd));
@ -488,7 +496,7 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
checkInterrupt();
std::string childName = dirent->d_name;
if (childName == "." || childName == "..") continue;
_deletePath(dirfd(dir.get()), path + "/" + childName, bytesFreed, ex MOUNTEDPATHS_ARG);
_deletePath(dirfd(dir.get()), path / childName, bytesFreed, ex MOUNTEDPATHS_ARG);
}
if (errno) throw SysError("reading directory %1%", path);
}
@ -513,14 +521,13 @@ static void _deletePath(Descriptor parentfd, const std::filesystem::path & path,
static void _deletePath(const std::filesystem::path & path, uint64_t & bytesFreed MOUNTEDPATHS_PARAM)
{
Path dir = dirOf(path.string());
if (dir == "")
dir = "/";
assert(path.is_absolute());
assert(path.parent_path() != path);
AutoCloseFD dirfd = toDescriptor(open(dir.c_str(), O_RDONLY));
AutoCloseFD dirfd = toDescriptor(open(path.parent_path().string().c_str(), O_RDONLY));
if (!dirfd) {
if (errno == ENOENT) return;
throw SysError("opening directory '%1%'", path);
throw SysError("opening directory %s", path.parent_path());
}
std::exception_ptr ex;

View file

@ -4,4 +4,5 @@ include_dirs += include_directories('../..')
headers += files(
'freebsd-jail.hh',
# hack for trailing newline
)

View file

@ -1,5 +1,6 @@
sources += files(
'freebsd-jail.cc',
# hack for trailing newline
)
subdir('include/nix/util')

View file

@ -169,21 +169,27 @@ std::string readFile(const Path & path);
std::string readFile(const std::filesystem::path & path);
void readFile(const Path & path, Sink & sink, bool memory_map = true);
enum struct FsSync { Yes, No };
/**
* Write a string to a file.
*/
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false);
static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, bool sync = false)
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No);
static inline void writeFile(const std::filesystem::path & path, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No)
{
return writeFile(path.string(), s, mode, sync);
}
void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false);
static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, bool sync = false)
void writeFile(const Path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No);
static inline void writeFile(const std::filesystem::path & path, Source & source, mode_t mode = 0666, FsSync sync = FsSync::No)
{
return writeFile(path.string(), source, mode, sync);
}
void writeFile(AutoCloseFD & fd, const Path & origPath, std::string_view s, mode_t mode = 0666, FsSync sync = FsSync::No);
/**
* Flush a path's parent directory to disk.
*/

View file

@ -55,7 +55,7 @@ struct LoggerSettings : Config
Setting<Path> jsonLogPath{
this, "", "json-log-path",
R"(
A file or unix socket to which JSON records of Nix's log output will be
A file or unix socket to which JSON records of Nix's log output are
written, in the same format as `--log-format internal-json`
(without the `@nix ` prefixes on each line).
Concurrent writes to the same file by multiple Nix processes are not supported and

View file

@ -1,6 +1,6 @@
# Public headers directory
include_dirs = [include_directories('../..')]
include_dirs = [ include_directories('../..') ]
headers = files(
'abstract-setting-to-json.hh',
@ -59,12 +59,13 @@ headers = files(
'signals.hh',
'signature/local-keys.hh',
'signature/signer.hh',
'sort.hh',
'source-accessor.hh',
'source-path.hh',
'split.hh',
'std-hash.hh',
'strings.hh',
'strings-inline.hh',
'strings.hh',
'suggestions.hh',
'sync.hh',
'tarfile.hh',

View file

@ -0,0 +1,299 @@
#pragma once
#include <algorithm>
#include <iterator>
#include <concepts>
#include <vector>
#include <type_traits>
#include <functional>
/**
* @file
*
* In-house implementation of sorting algorithms. Used for cases when several properties
* need to be upheld regardless of the stdlib implementation of std::sort or
* std::stable_sort.
*
* PeekSort implementation is adapted from reference implementation
* https://github.com/sebawild/powersort licensed under the MIT License.
*
*/
/* PeekSort attribution:
*
* MIT License
*
* Copyright (c) 2022 Sebastian Wild
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
namespace nix {
/**
* Merge sorted runs [begin, middle) with [middle, end) in-place [begin, end).
* Uses a temporary working buffer by first copying [begin, end) to it.
*
* @param begin Start of the first subrange to be sorted.
* @param middle End of the first sorted subrange and the start of the second.
* @param end End of the second sorted subrange.
* @param workingBegin Start of the working buffer.
* @param comp Comparator implementing an operator()(const ValueType& lhs, const ValueType& rhs).
*
* @pre workingBegin buffer must have at least std::distance(begin, end) elements.
*
* @note We can't use std::inplace_merge or std::merge, because their behavior
* is undefined if the comparator is not strict weak ordering.
*/
template<
std::forward_iterator Iter,
std::random_access_iterator BufIter,
typename Comparator = std::less<std::iter_value_t<Iter>>>
void mergeSortedRunsInPlace(Iter begin, Iter middle, Iter end, BufIter workingBegin, Comparator comp = {})
{
const BufIter workingMiddle = std::move(begin, middle, workingBegin);
const BufIter workingEnd = std::move(middle, end, workingMiddle);
Iter output = begin;
BufIter workingLeft = workingBegin;
BufIter workingRight = workingMiddle;
while (workingLeft != workingMiddle && workingRight != workingEnd) {
/* Note the inversion here !comp(...., ....). This is required for the merge to be stable.
If a == b where a if from the left part and b is the the right, then we have to pick
a. */
*output++ = !comp(*workingRight, *workingLeft) ? std::move(*workingLeft++) : std::move(*workingRight++);
}
std::move(workingLeft, workingMiddle, output);
std::move(workingRight, workingEnd, output);
}
/**
* Simple insertion sort.
*
* Does not require that the std::iter_value_t<Iter> is copyable.
*
* @param begin Start of the range to sort.
* @param end End of the range to sort.
* @comp Comparator the defines the ordering. Order of elements if the comp is not strict weak ordering
* is not specified.
* @throws Nothing.
*
* Note on exception safety: this function provides weak exception safety
* guarantees. To elaborate: if the comparator throws or move assignment
* throws (value type is not nothrow_move_assignable) then the range is left in
* a consistent, but unspecified state.
*
* @note This can't be implemented in terms of binary search if the strict weak ordering
* needs to be handled in a well-defined but unspecified manner.
*/
template<std::bidirectional_iterator Iter, typename Comparator = std::less<std::iter_value_t<Iter>>>
void insertionsort(Iter begin, Iter end, Comparator comp = {})
{
if (begin == end)
return;
for (Iter current = std::next(begin); current != end; ++current) {
for (Iter insertionPoint = current;
insertionPoint != begin && comp(*insertionPoint, *std::prev(insertionPoint));
--insertionPoint) {
std::swap(*insertionPoint, *std::prev(insertionPoint));
}
}
}
/**
* Find maximal i <= end such that [begin, i) is strictly decreasing according
* to the specified comparator.
*/
template<std::forward_iterator Iter, typename Comparator = std::less<std::iter_value_t<Iter>>>
Iter strictlyDecreasingPrefix(Iter begin, Iter end, Comparator && comp = {})
{
if (begin == end)
return begin;
while (std::next(begin) != end && /* *std::next(begin) < begin */
comp(*std::next(begin), *begin))
++begin;
return std::next(begin);
}
/**
* Find minimal i >= start such that [i, end) is strictly decreasing according
* to the specified comparator.
*/
template<std::bidirectional_iterator Iter, typename Comparator = std::less<std::iter_value_t<Iter>>>
Iter strictlyDecreasingSuffix(Iter begin, Iter end, Comparator && comp = {})
{
if (begin == end)
return end;
while (std::prev(end) > begin && /* *std::prev(end) < *std::prev(end, 2) */
comp(*std::prev(end), *std::prev(end, 2)))
--end;
return std::prev(end);
}
/**
* Find maximal i <= end such that [begin, i) is weakly increasing according
* to the specified comparator.
*/
template<std::bidirectional_iterator Iter, typename Comparator = std::less<std::iter_value_t<Iter>>>
Iter weaklyIncreasingPrefix(Iter begin, Iter end, Comparator && comp = {})
{
return strictlyDecreasingPrefix(begin, end, std::not_fn(std::forward<Comparator>(comp)));
}
/**
* Find minimal i >= start such that [i, end) is weakly increasing according
* to the specified comparator.
*/
template<std::bidirectional_iterator Iter, typename Comparator = std::less<std::iter_value_t<Iter>>>
Iter weaklyIncreasingSuffix(Iter begin, Iter end, Comparator && comp = {})
{
return strictlyDecreasingSuffix(begin, end, std::not_fn(std::forward<Comparator>(comp)));
}
/**
* Peeksort stable sorting algorithm. Sorts elements in-place.
* Allocates additional memory as needed.
*
* @details
* PeekSort is a stable, near-optimal natural mergesort. Most importantly, like any
* other mergesort it upholds the "Ord safety" property. Meaning that even for
* comparator predicates that don't satisfy strict weak ordering it can't result
* in infinite loops/out of bounds memory accesses or other undefined behavior.
*
* As a quick reminder, strict weak ordering relation operator< must satisfy
* the following properties. Keep in mind that in C++ an equvalence relation
* is specified in terms of operator< like so: a ~ b iff !(a < b) && !(b < a).
*
* 1. a < a === false - relation is irreflexive
* 2. a < b, b < c => a < c - transitivity
* 3. a ~ b, a ~ b, b ~ c => a ~ c, transitivity of equivalence
*
* @see https://www.wild-inter.net/publications/munro-wild-2018
* @see https://github.com/Voultapher/sort-research-rs/blob/main/writeup/sort_safety/text.md#property-analysis
*
* The order of elements when comp is not strict weak ordering is not specified, but
* is not undefined. The output is always some permutation of the input, regardless
* of the comparator provided.
* Relying on ordering in such cases is erroneous, but this implementation
* will happily accept broken comparators and will not crash.
*
* @param begin Start of the range to be sorted.
* @param end End of the range to be sorted.
* @comp comp Comparator implementing an operator()(const ValueType& lhs, const ValueType& rhs).
*
* @throws std::bad_alloc if the temporary buffer can't be allocated.
*
* @return Nothing.
*
* Note on exception safety: this function provides weak exception safety
* guarantees. To elaborate: if the comparator throws or move assignment
* throws (value type is not nothrow_move_assignable) then the range is left in
* a consistent, but unspecified state.
*
*/
template<std::random_access_iterator Iter, typename Comparator = std::less<std::iter_value_t<Iter>>>
/* ValueType must be default constructible to create the temporary buffer */
requires std::is_default_constructible_v<std::iter_value_t<Iter>>
void peeksort(Iter begin, Iter end, Comparator comp = {})
{
auto length = std::distance(begin, end);
/* Special-case very simple inputs. This is identical to how libc++ does it. */
switch (length) {
case 0:
[[fallthrough]];
case 1:
return;
case 2:
if (comp(*--end, *begin)) /* [a, b], b < a */
std::swap(*begin, *end);
return;
}
using ValueType = std::iter_value_t<Iter>;
auto workingBuffer = std::vector<ValueType>(length);
/*
* sorts [begin, end), assuming that [begin, leftRunEnd) and
* [rightRunBegin, end) are sorted.
* Modified implementation from:
* https://github.com/sebawild/powersort/blob/1d078b6be9023e134c4f8f6de88e2406dc681e89/src/sorts/peeksort.h
*/
auto peeksortImpl = [&workingBuffer,
&comp](auto & peeksortImpl, Iter begin, Iter end, Iter leftRunEnd, Iter rightRunBegin) {
if (leftRunEnd == end || rightRunBegin == begin)
return;
/* Dispatch to simpler insertion sort implementation for smaller cases
Cut-off limit is the same as in libstdc++
https://github.com/gcc-mirror/gcc/blob/d9375e490072d1aae73a93949aa158fcd2a27018/libstdc%2B%2B-v3/include/bits/stl_algo.h#L4977
*/
static constexpr std::size_t insertionsortThreshold = 16;
size_t length = std::distance(begin, end);
if (length <= insertionsortThreshold)
return insertionsort(begin, end, comp);
Iter middle = std::next(begin, (length / 2)); /* Middle split between m and m - 1 */
if (middle <= leftRunEnd) {
/* |XXXXXXXX|XX X| */
peeksortImpl(peeksortImpl, leftRunEnd, end, std::next(leftRunEnd), rightRunBegin);
mergeSortedRunsInPlace(begin, leftRunEnd, end, workingBuffer.begin(), comp);
return;
} else if (middle >= rightRunBegin) {
/* |XX X|XXXXXXXX| */
peeksortImpl(peeksortImpl, begin, rightRunBegin, leftRunEnd, std::prev(rightRunBegin));
mergeSortedRunsInPlace(begin, rightRunBegin, end, workingBuffer.begin(), comp);
return;
}
/* Find middle run, i.e., run containing m - 1 */
Iter i, j;
if (!comp(*middle, *std::prev(middle)) /* *std::prev(middle) <= *middle */) {
i = weaklyIncreasingSuffix(leftRunEnd, middle, comp);
j = weaklyIncreasingPrefix(std::prev(middle), rightRunBegin, comp);
} else {
i = strictlyDecreasingSuffix(leftRunEnd, middle, comp);
j = strictlyDecreasingPrefix(std::prev(middle), rightRunBegin, comp);
std::reverse(i, j);
}
if (i == begin && j == end)
return; /* single run */
if (middle - i < j - middle) {
/* |XX x|xxxx X| */
peeksortImpl(peeksortImpl, begin, i, leftRunEnd, std::prev(i));
peeksortImpl(peeksortImpl, i, end, j, rightRunBegin);
mergeSortedRunsInPlace(begin, i, end, workingBuffer.begin(), comp);
} else {
/* |XX xxx|x X| */
peeksortImpl(peeksortImpl, begin, j, leftRunEnd, i);
peeksortImpl(peeksortImpl, j, end, std::next(j), rightRunBegin);
mergeSortedRunsInPlace(begin, j, end, workingBuffer.begin(), comp);
}
};
peeksortImpl(peeksortImpl, begin, end, /*leftRunEnd=*/begin, /*rightRunBegin=*/end);
}
}

View file

@ -5,4 +5,5 @@ include_dirs += include_directories('../..')
headers += files(
'cgroup.hh',
'linux-namespaces.hh',
# hack for trailing newline
)

View file

@ -1,6 +1,7 @@
sources += files(
'cgroup.cc',
'linux-namespaces.cc',
# hack for trailing newline
)
subdir('include/nix/util')

View file

@ -6,4 +6,5 @@ headers += files(
'signals-impl.hh',
'windows-async-pipe.hh',
'windows-error.hh',
# hack for trailing newline
)

View file

@ -387,8 +387,8 @@ static void main_nix_build(int argc, char * * argv)
return false;
}
bool add = false;
if (v.type() == nFunction && v.payload.lambda.fun->hasFormals()) {
for (auto & i : v.payload.lambda.fun->formals->formals) {
if (v.type() == nFunction && v.lambda().fun->hasFormals()) {
for (auto & i : v.lambda().fun->formals->formals) {
if (state->symbols[i.name] == "inNixShell") {
add = true;
break;
@ -474,7 +474,7 @@ static void main_nix_build(int argc, char * * argv)
} catch (Error & e) {
logError(e.info());
notice("will use bash from your environment");
notice("uses bash from your environment");
shell = "bash";
}
}

27
src/nix/flake-command.hh Normal file
View file

@ -0,0 +1,27 @@
#pragma once
#include "nix/cmd/command.hh"
#include "nix/cmd/installable-flake.hh"
#include "nix/flake/flake.hh"
namespace nix {
using namespace nix::flake;
class FlakeCommand : virtual Args, public MixFlakeOptions
{
protected:
std::string flakeUrl = ".";
public:
FlakeCommand();
FlakeRef getFlakeRef();
LockedFlake lockFlake();
std::vector<FlakeRef> getFlakeRefsForCompletion() override;
};
}

View file

@ -1,11 +1,9 @@
#include "nix/cmd/command.hh"
#include "nix/cmd/installable-flake.hh"
#include "flake-command.hh"
#include "nix/main/common-args.hh"
#include "nix/main/shared.hh"
#include "nix/expr/eval.hh"
#include "nix/expr/eval-inline.hh"
#include "nix/expr/eval-settings.hh"
#include "nix/flake/flake.hh"
#include "nix/expr/get-drvs.hh"
#include "nix/util/signals.hh"
#include "nix/store/store-open.hh"
@ -33,43 +31,36 @@ using namespace nix::flake;
using json = nlohmann::json;
struct CmdFlakeUpdate;
class FlakeCommand : virtual Args, public MixFlakeOptions
FlakeCommand::FlakeCommand()
{
protected:
std::string flakeUrl = ".";
expectArgs({
.label = "flake-url",
.optional = true,
.handler = {&flakeUrl},
.completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) {
completeFlakeRef(completions, getStore(), prefix);
}}
});
}
public:
FlakeRef FlakeCommand::getFlakeRef()
{
return parseFlakeRef(fetchSettings, flakeUrl, std::filesystem::current_path().string()); //FIXME
}
FlakeCommand()
{
expectArgs({
.label = "flake-url",
.optional = true,
.handler = {&flakeUrl},
.completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) {
completeFlakeRef(completions, getStore(), prefix);
}}
});
}
LockedFlake FlakeCommand::lockFlake()
{
return flake::lockFlake(flakeSettings, *getEvalState(), getFlakeRef(), lockFlags);
}
FlakeRef getFlakeRef()
{
return parseFlakeRef(fetchSettings, flakeUrl, std::filesystem::current_path().string()); //FIXME
}
LockedFlake lockFlake()
{
return flake::lockFlake(flakeSettings, *getEvalState(), getFlakeRef(), lockFlags);
}
std::vector<FlakeRef> getFlakeRefsForCompletion() override
{
return {
// Like getFlakeRef but with expandTilde called first
parseFlakeRef(fetchSettings, expandTilde(flakeUrl), std::filesystem::current_path().string())
};
}
};
std::vector<FlakeRef> FlakeCommand::getFlakeRefsForCompletion()
{
return {
// Like getFlakeRef but with expandTilde called first
parseFlakeRef(fetchSettings, expandTilde(flakeUrl), std::filesystem::current_path().string())
};
}
struct CmdFlakeUpdate : FlakeCommand
{
@ -492,8 +483,8 @@ struct CmdFlakeCheck : FlakeCommand
if (!v.isLambda()) {
throw Error("overlay is not a function, but %s instead", showType(v));
}
if (v.payload.lambda.fun->hasFormals()
|| !argHasName(v.payload.lambda.fun->arg, "final"))
if (v.lambda().fun->hasFormals()
|| !argHasName(v.lambda().fun->arg, "final"))
throw Error("overlay does not take an argument named 'final'");
// FIXME: if we have a 'nixpkgs' input, use it to
// evaluate the overlay.
@ -1528,21 +1519,7 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON
struct CmdFlake : NixMultiCommand
{
CmdFlake()
: NixMultiCommand(
"flake",
{
{"update", []() { return make_ref<CmdFlakeUpdate>(); }},
{"lock", []() { return make_ref<CmdFlakeLock>(); }},
{"metadata", []() { return make_ref<CmdFlakeMetadata>(); }},
{"info", []() { return make_ref<CmdFlakeInfo>(); }},
{"check", []() { return make_ref<CmdFlakeCheck>(); }},
{"init", []() { return make_ref<CmdFlakeInit>(); }},
{"new", []() { return make_ref<CmdFlakeNew>(); }},
{"clone", []() { return make_ref<CmdFlakeClone>(); }},
{"archive", []() { return make_ref<CmdFlakeArchive>(); }},
{"show", []() { return make_ref<CmdFlakeShow>(); }},
{"prefetch", []() { return make_ref<CmdFlakePrefetch>(); }},
})
: NixMultiCommand("flake", RegisterCommand::getCommandsFor({"flake"}))
{
}
@ -1566,3 +1543,14 @@ struct CmdFlake : NixMultiCommand
};
static auto rCmdFlake = registerCommand<CmdFlake>("flake");
static auto rCmdFlakeArchive = registerCommand2<CmdFlakeArchive>({"flake", "archive"});
static auto rCmdFlakeCheck = registerCommand2<CmdFlakeCheck>({"flake", "check"});
static auto rCmdFlakeClone = registerCommand2<CmdFlakeClone>({"flake", "clone"});
static auto rCmdFlakeInfo = registerCommand2<CmdFlakeInfo>({"flake", "info"});
static auto rCmdFlakeInit = registerCommand2<CmdFlakeInit>({"flake", "init"});
static auto rCmdFlakeLock = registerCommand2<CmdFlakeLock>({"flake", "lock"});
static auto rCmdFlakeMetadata = registerCommand2<CmdFlakeMetadata>({"flake", "metadata"});
static auto rCmdFlakeNew = registerCommand2<CmdFlakeNew>({"flake", "new"});
static auto rCmdFlakePrefetch = registerCommand2<CmdFlakePrefetch>({"flake", "prefetch"});
static auto rCmdFlakeShow = registerCommand2<CmdFlakeShow>({"flake", "show"});
static auto rCmdFlakeUpdate = registerCommand2<CmdFlakeUpdate>({"flake", "update"});

View file

@ -572,7 +572,7 @@ struct CmdDaemon : Command
addFlag({
.longName = "force-untrusted",
.description = "Force the daemon to not trust connecting clients. The connection will be processed by the receiving daemon before forwarding commands.",
.description = "Force the daemon to not trust connecting clients. The connection is processed by the receiving daemon before forwarding commands.",
.handler = {[&]() {
isTrustedOpt = NotTrusted;
}},

View file

@ -7,6 +7,7 @@
nix_perl_tests = files(
'init.t',
# hack for trailing newline
)

View file

@ -12,7 +12,6 @@ requiresUnprivilegedUserNamespaces
[[ $busybox =~ busybox ]] || skipTest "no busybox"
unset NIX_STORE_DIR
unset NIX_STATE_DIR
# We first build a dependency of the derivation we eventually want to
# build.

View file

@ -9,7 +9,6 @@ requiresUnprivilegedUserNamespaces
[[ "$busybox" =~ busybox ]] || skipTest "no busybox"
unset NIX_STORE_DIR
unset NIX_STATE_DIR
remoteDir=$TEST_ROOT/remote

View file

@ -8,7 +8,6 @@ requiresUnprivilegedUserNamespaces
# Avoid store dir being inside sandbox build-dir
unset NIX_STORE_DIR
unset NIX_STATE_DIR
function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; }

View file

@ -22,6 +22,11 @@ clearStore
nix-build dependencies.nix --no-out-link
nix-build dependencies.nix --no-out-link --check
# Make sure checking just one output works (#13293)
nix-build multiple-outputs.nix -A a --no-out-link
nix-store --delete "$(nix-build multiple-outputs.nix -A a.second --no-out-link)"
nix-build multiple-outputs.nix -A a.first --no-out-link --check
# Build failure exit codes (100, 104, etc.) are from
# doc/manual/source/command-ref/status-build-failure.md

View file

@ -81,7 +81,7 @@ path2=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"
[[ $(nix eval --raw --expr "builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\")") = world ]]
# But without a hash, it fails.
expectStderr 1 nix eval --expr 'builtins.fetchGit "file:///foo"' | grepQuiet "'fetchGit' will not fetch unlocked input"
expectStderr 1 nix eval --expr 'builtins.fetchGit "file:///foo"' | grepQuiet "'fetchGit' doesn't fetch unlocked input"
# Fetch again. This should be cached.
mv $repo ${repo}-tmp

View file

@ -43,7 +43,7 @@ path=$(nix eval --impure --raw --expr "(builtins.fetchTree { type = \"git\"; url
[[ $(nix eval --impure --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow-clone\"; ref = \"dev\"; shallow = true; }).revCount or 123") == 123 ]]
# Test 3: Check unlocked input error message
expectStderr 1 nix eval --expr 'builtins.fetchTree { type = "git"; url = "file:///foo"; }' | grepQuiet "'fetchTree' will not fetch unlocked input"
expectStderr 1 nix eval --expr 'builtins.fetchTree { type = "git"; url = "file:///foo"; }' | grepQuiet "'fetchTree' doesn't fetch unlocked input"
# Test 4: Regression test for revCount in worktrees derived from shallow clones
# Add a worktree to the shallow clone

View file

@ -160,7 +160,7 @@ expect 1 nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --no-update-lock-file
nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --commit-lock-file
[[ -e "$flake2Dir/flake.lock" ]]
[[ -z $(git -C "$flake2Dir" diff main || echo failed) ]]
[[ $(jq --indent 0 . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]]
[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]]
# Rerunning the build should not change the lockfile.
nix build -o "$TEST_ROOT/result" "$flake2Dir#bar"

View file

@ -69,7 +69,7 @@ git -C "$rootFlake" add flake.nix sub2/flake.nix
git -C "$rootFlake" add sub2/flake.lock
[[ $(nix eval "$subflake2#y") = 15 ]]
[[ $(jq --indent 0 . < "$subflake2/flake.lock") =~ ^'{"nodes":{"root":{"inputs":{"root":"root_2","sub1":"sub1"}},"root_2":{"inputs":{"sub0":"sub0"},"locked":{"path":"..","type":"path"},"original":{"path":"..","type":"path"},"parent":[]},"root_3":{"inputs":{"sub0":"sub0_2"},"locked":{"path":"../","type":"path"},"original":{"path":"../","type":"path"},"parent":["sub1"]},"sub0":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["root"]},"sub0_2":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["sub1","root"]},"sub1":{"inputs":{"root":"root_3"},"locked":{"path":"../sub1","type":"path"},"original":{"path":"../sub1","type":"path"},"parent":[]}},"root":"root","version":7}'$ ]]
[[ $(jq --indent 0 --compact-output . < "$subflake2/flake.lock") =~ ^'{"nodes":{"root":{"inputs":{"root":"root_2","sub1":"sub1"}},"root_2":{"inputs":{"sub0":"sub0"},"locked":{"path":"..","type":"path"},"original":{"path":"..","type":"path"},"parent":[]},"root_3":{"inputs":{"sub0":"sub0_2"},"locked":{"path":"../","type":"path"},"original":{"path":"../","type":"path"},"parent":["sub1"]},"sub0":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["root"]},"sub0_2":{"locked":{"path":"sub0","type":"path"},"original":{"path":"sub0","type":"path"},"parent":["sub1","root"]},"sub1":{"inputs":{"root":"root_3"},"locked":{"path":"../sub1","type":"path"},"original":{"path":"../sub1","type":"path"},"parent":[]}},"root":"root","version":7}'$ ]]
# Make sure there are no content locks for relative path flakes.
(! grep "$TEST_ROOT" "$subflake2/flake.lock")

View file

@ -33,7 +33,7 @@ echo 456 > "$flake1Dir"/x.nix
# Dirty overrides require --allow-dirty-locks.
expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" |
grepQuiet "Will not write lock file.*because it has an unlocked input"
grepQuiet "Not writing lock file.*because it has an unlocked input"
nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks

View file

@ -1 +1 @@
[ [ 42 77 147 249 483 526 ] [ 526 483 249 147 77 42 ] [ "bar" "fnord" "foo" "xyzzy" ] [ { key = 1; value = "foo"; } { key = 1; value = "fnord"; } { key = 2; value = "bar"; } ] [ [ ] [ ] [ 1 ] [ 1 4 ] [ 1 5 ] [ 1 6 ] [ 2 ] [ 2 3 ] [ 3 ] [ 3 ] ] ]
[ [ 42 77 147 249 483 526 ] [ 526 483 249 147 77 42 ] [ "bar" "fnord" "foo" "xyzzy" ] [ { key = 1; value = "foo"; } { key = 1; value = "fnord"; } { key = 2; value = "bar"; } ] [ { key = 1; value = "foo"; } { key = 1; value = "foo2"; } { key = 1; value = "foo3"; } { key = 1; value = "foo4"; } { key = 1; value = "foo5"; } { key = 1; value = "foo6"; } { key = 1; value = "foo7"; } { key = 1; value = "foo8"; } { key = 2; value = "bar"; } { key = 2; value = "bar2"; } { key = 2; value = "bar3"; } { key = 2; value = "bar4"; } { key = 2; value = "bar5"; } { key = 3; value = "baz"; } { key = 3; value = "baz2"; } { key = 3; value = "baz3"; } { key = 3; value = "baz4"; } { key = 4; value = "biz1"; } ] [ [ ] [ ] [ 1 ] [ 1 4 ] [ 1 5 ] [ 1 6 ] [ 2 ] [ 2 3 ] [ 3 ] [ 3 ] ] ]

View file

@ -37,6 +37,80 @@ with builtins;
value = "fnord";
}
])
(sort (x: y: x.key < y.key) [
{
key = 1;
value = "foo";
}
{
key = 2;
value = "bar";
}
{
key = 1;
value = "foo2";
}
{
key = 2;
value = "bar2";
}
{
key = 2;
value = "bar3";
}
{
key = 2;
value = "bar4";
}
{
key = 1;
value = "foo3";
}
{
key = 3;
value = "baz";
}
{
key = 3;
value = "baz2";
}
{
key = 1;
value = "foo4";
}
{
key = 3;
value = "baz3";
}
{
key = 1;
value = "foo5";
}
{
key = 1;
value = "foo6";
}
{
key = 2;
value = "bar5";
}
{
key = 3;
value = "baz4";
}
{
key = 1;
value = "foo7";
}
{
key = 4;
value = "biz1";
}
{
key = 1;
value = "foo8";
}
])
(sort lessThan [
[
1

View file

@ -3,6 +3,7 @@ libplugintest = shared_module(
'plugintest.cc',
dependencies : [
dependency('nix-expr'),
# hack for trailing newline
],
build_by_default : false,
)

View file

@ -67,7 +67,7 @@ testRepl () {
# Simple test, try building a drv
testRepl
# Same thing (kind-of), but with a remote store.
testRepl --store "$TEST_ROOT/store?real=$NIX_STORE_DIR"
testRepl --store "$TEST_ROOT/other-root?real=$NIX_STORE_DIR"
# Remove ANSI escape sequences. They can prevent grep from finding a match.
stripColors () {
@ -157,13 +157,40 @@ foo + baz
' "3" \
./flake ./flake\#bar --experimental-features 'flakes'
testReplResponse $'
:a { a = 1; b = 2; longerName = 3; "with spaces" = 4; }
' 'Added 4 variables.
a, b, longerName, "with spaces"
'
cat <<EOF > attribute-set.nix
{
a = 1;
b = 2;
longerName = 3;
"with spaces" = 4;
}
EOF
testReplResponse '
:l ./attribute-set.nix
' 'Added 4 variables.
a, b, longerName, "with spaces"
'
testReplResponseNoRegex $'
:a builtins.foldl\' (x: y: x // y) {} (map (x: { ${builtins.toString x} = x; }) (builtins.genList (x: x) 23))
' 'Added 23 variables.
"0", "1", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "2", "20", "21", "22", "3", "4", "5", "6"
... and 3 more; view with :ll'
# Test the `:reload` mechansim with flakes:
# - Eval `./flake#changingThing`
# - Modify the flake
# - Re-eval it
# - Check that the result has changed
mkfifo repl_fifo
nix repl ./flake --experimental-features 'flakes' < repl_fifo > repl_output 2>&1 &
touch repl_output
nix repl ./flake --experimental-features 'flakes' < repl_fifo >> repl_output 2>&1 &
repl_pid=$!
exec 3>repl_fifo # Open fifo for writing
echo "changingThing" >&3
@ -277,6 +304,12 @@ testReplResponseNoRegex '
}
'
# Don't prompt for more input when getting unexpected EOF in imported files.
testReplResponse "
import $testDir/lang/parse-fail-eof-pos.nix
" \
'.*error: syntax error, unexpected end of file.*'
# TODO: move init to characterisation/framework.sh
badDiff=0
badExitCode=0
@ -322,7 +355,8 @@ runRepl () {
-e "s@$testDir@/path/to/tests/functional@g" \
-e "s@$testDirNoUnderscores@/path/to/tests/functional@g" \
-e "s@$nixVersion@<nix version>@g" \
-e "s@Added [0-9]* variables@Added <number omitted> variables@g" \
-e "/Added [0-9]* variables/{s@ [0-9]* @ <number omitted> @;n;d}" \
-e '/\.\.\. and [0-9]* more; view with :ll/d' \
| grep -vF $'warning: you don\'t have Internet access; disabling some network-dependent features' \
;
}

View file

@ -50,4 +50,4 @@ expectStderr 0 nix-instantiate --expr "$hackyExpr" --eval --strict | grepQuiet "
# Check it works with the expected structured attrs
hacky=$(nix-instantiate --expr "$hackyExpr")
nix derivation show "$hacky" | jq --exit-status '."'"$hacky"'".env.__json | fromjson | . == {"a": 1}'
nix derivation show "$hacky" | jq --exit-status '."'"$hacky"'".structuredAttrs | . == {"a": 1}'

View file

@ -14,7 +14,6 @@ execUnshare <<EOF
# Avoid store dir being inside sandbox build-dir
unset NIX_STORE_DIR
unset NIX_STATE_DIR
setLocalStore () {
export NIX_REMOTE=\$TEST_ROOT/\$1

View file

@ -3,6 +3,7 @@ libstoreconsumer_tester = executable(
'main.cc',
dependencies : [
dependency('nix-store'),
# hack for trailing newline
],
build_by_default : false,
)

View file

@ -41,5 +41,9 @@ in
# Test that /nix/store is available via an overlayfs mount.
machine.succeed("nix shell --store /tmp/nix ${pkgA} --command cowsay foo >&2")
# Building in /tmp should fail for security reasons.
err = machine.fail("nix build --offline --store /tmp/nix --expr 'builtins.derivation { name = \"foo\"; system = \"x86_64-linux\"; builder = \"/foo\"; }' 2>&1")
assert "is world-writable" in err
'';
}

View file

@ -227,7 +227,7 @@ in
# Fetching without a narHash should succeed if trust-github is set and fail otherwise.
client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'")
out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1")
assert "will not fetch unlocked input" in out, "--no-trust-tarballs-from-git-forges did not fail with the expected error"
assert "doesn't fetch unlocked input" in out, "--no-trust-tarballs-from-git-forges did not fail with the expected error"
# Shut down the web server. The flake should be cached on the client.
github.succeed("systemctl stop httpd.service")

View file

@ -1,21 +1,15 @@
# Test the container built by ../../docker.nix.
{
lib,
config,
nixpkgs,
hostPkgs,
...
}:
let
pkgs = config.nodes.machine.nixpkgs.pkgs;
nixImage = import ../../docker.nix {
inherit (config.nodes.machine.nixpkgs) pkgs;
};
nixUserImage = import ../../docker.nix {
inherit (config.nodes.machine.nixpkgs) pkgs;
nixImage = pkgs.callPackage ../../docker.nix { };
nixUserImage = pkgs.callPackage ../../docker.nix {
name = "nix-user";
uid = 1000;
gid = 1000;