1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-06-25 10:41:16 +02:00

Merge remote-tracking branch 'upstream/master' into tomberek.access-token-prefixing

This commit is contained in:
Robert Hensing 2025-02-25 15:22:08 +01:00
commit 1222438ae7
133 changed files with 2861 additions and 1012 deletions

View file

@ -1,5 +1,5 @@
[book]
title = "Nix Reference Manual"
title = "Nix @version@ Reference Manual"
src = "source"
[output.html]

View file

@ -83,6 +83,7 @@ manual = custom_target(
'''
@0@ @INPUT0@ @CURRENT_SOURCE_DIR@ > @DEPFILE@
@0@ @INPUT1@ summary @2@ < @CURRENT_SOURCE_DIR@/source/SUMMARY.md.in > @2@/source/SUMMARY.md
sed -e 's|@version@|@3@|g' < @INPUT2@ > @2@/book.toml
rsync -r --include='*.md' @CURRENT_SOURCE_DIR@/ @2@/
(cd @2@; RUST_LOG=warn @1@ build -d @2@ 3>&2 2>&1 1>&3) | { grep -Fv "because fragment resolution isn't implemented" || :; } 3>&2 2>&1 1>&3
rm -rf @2@/manual
@ -92,12 +93,13 @@ manual = custom_target(
python.full_path(),
mdbook.full_path(),
meson.current_build_dir(),
meson.project_version(),
),
],
input : [
generate_manual_deps,
'substitute.py',
'book.toml',
'book.toml.in',
'anchors.jq',
'custom.css',
nix3_cli_files,

View file

@ -0,0 +1,18 @@
---
synopsis: "Git LFS support"
prs: [10153, 12468]
---
The Git fetcher now supports Large File Storage (LFS). This can be enabled by passing the attribute `lfs = true` to the fetcher, e.g.
```console
nix flake prefetch 'git+ssh://git@github.com/Apress/repo-with-large-file-storage.git?lfs=1'
```
A flake can also declare that it requires lfs to be enabled:
```
{
inputs.self.lfs = true;
}
```
Author: [**@b-camacho**](https://github.com/b-camacho), [**@kip93**](https://github.com/kip93)

View file

@ -28,7 +28,7 @@ $ nix-shell --attr devShells.x86_64-linux.native-clangStdenvPackages
> **Note**
>
> You can use `native-ccacheStdenvPackages` to drastically improve rebuild time.
> You can use `native-ccacheStdenv` to drastically improve rebuild time.
> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`.
To build Nix itself in this shell:

52
flake.lock generated
View file

@ -36,24 +36,6 @@
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks-nix": {
"inputs": {
"flake-compat": [],
@ -79,24 +61,6 @@
"type": "github"
}
},
"nixfmt": {
"inputs": {
"flake-utils": "flake-utils"
},
"locked": {
"lastModified": 1736283758,
"narHash": "sha256-hrKhUp2V2fk/dvzTTHFqvtOg000G1e+jyIam+D4XqhA=",
"owner": "NixOS",
"repo": "nixfmt",
"rev": "8d4bd690c247004d90d8554f0b746b1231fe2436",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixfmt",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1734359947,
@ -150,26 +114,10 @@
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"git-hooks-nix": "git-hooks-nix",
"nixfmt": "nixfmt",
"nixpkgs": "nixpkgs",
"nixpkgs-23-11": "nixpkgs-23-11",
"nixpkgs-regression": "nixpkgs-regression"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View file

@ -20,7 +20,6 @@
# work around 7730 and https://github.com/NixOS/nix/issues/7807
inputs.git-hooks-nix.inputs.flake-compat.follows = "";
inputs.git-hooks-nix.inputs.gitignore.follows = "";
inputs.nixfmt.url = "github:NixOS/nixfmt";
outputs =
inputs@{
@ -155,6 +154,7 @@
f = import ./packaging/components.nix {
inherit (final) lib;
inherit officialRelease;
pkgs = final;
src = self;
};
};
@ -224,6 +224,30 @@
LANG=C.UTF-8 ${pkgs.changelog-d}/bin/changelog-d ${./doc/manual/rl-next} >$out
'';
repl-completion = nixpkgsFor.${system}.native.callPackage ./tests/repl-completion.nix { };
/**
Checks for our packaging expressions.
This shouldn't build anything significant; just check that things
(including derivations) are _set up_ correctly.
*/
packaging-overriding =
let
pkgs = nixpkgsFor.${system}.native;
nix = self.packages.${system}.nix;
in
assert (nix.appendPatches [ pkgs.emptyFile ]).libs.nix-util.src.patches == [ pkgs.emptyFile ];
if pkgs.stdenv.buildPlatform.isDarwin then
lib.warn "packaging-overriding check currently disabled because of a permissions issue on macOS" pkgs.emptyFile
else
# If this fails, something might be wrong with how we've wired the scope,
# or something could be broken in Nixpkgs.
pkgs.testers.testEqualContents {
assertion = "trivial patch does not change source contents";
expected = "${./.}";
actual =
# Same for all components; nix-util is an arbitrary pick
(nix.appendPatches [ pkgs.emptyFile ]).libs.nix-util.src;
};
}
// (lib.optionalAttrs (builtins.elem system linux64BitSystems)) {
dockerImage = self.hydraJobs.dockerImage.${system};
@ -378,7 +402,7 @@
devShells =
let
makeShell = import ./packaging/dev-shell.nix { inherit inputs lib devFlake; };
makeShell = import ./packaging/dev-shell.nix { inherit lib devFlake; };
prefixAttrs = prefix: lib.concatMapAttrs (k: v: { "${prefix}-${k}" = v; });
in
forAllSystems (

View file

@ -39,7 +39,6 @@
};
nixfmt-rfc-style = {
enable = true;
package = inputs.nixfmt.packages.${pkgs.hostPlatform.system}.default;
excludes = [
# Invalid
''^tests/functional/lang/parse-.*\.nix$''

View file

@ -1,5 +1,6 @@
{
lib,
pkgs,
src,
officialRelease,
}:
@ -7,7 +8,23 @@
scope:
let
inherit (scope) callPackage;
inherit (scope)
callPackage
;
inherit
(scope.callPackage (
{ stdenv }:
{
inherit stdenv;
}
) { })
stdenv
;
inherit (pkgs.buildPackages)
meson
ninja
pkg-config
;
baseVersion = lib.fileContents ../.version;
@ -20,6 +37,165 @@ let
}_${src.shortRev or "dirty"}";
fineVersion = baseVersion + fineVersionSuffix;
root = ../.;
# Indirection for Nixpkgs to override when package.nix files are vendored
filesetToSource = lib.fileset.toSource;
/**
Given a set of layers, create a mkDerivation-like function
*/
mkPackageBuilder =
exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn);
setVersionLayer = finalAttrs: prevAttrs: {
preConfigure =
prevAttrs.prevAttrs or ""
+
# Update the repo-global .version file.
# Symlink ./.version points there, but by default only workDir is writable.
''
chmod u+w ./.version
echo ${finalAttrs.version} > ./.version
'';
};
localSourceLayer =
finalAttrs: prevAttrs:
let
workDirPath =
# Ideally we'd pick finalAttrs.workDir, but for now `mkDerivation` has
# the requirement that everything except passthru and meta must be
# serialized by mkDerivation, which doesn't work for this.
prevAttrs.workDir;
workDirSubpath = lib.path.removePrefix root workDirPath;
sources =
assert prevAttrs.fileset._type == "fileset";
prevAttrs.fileset;
src = lib.fileset.toSource {
fileset = sources;
inherit root;
};
in
{
sourceRoot = "${src.name}/" + workDirSubpath;
inherit src;
# Clear what `derivation` can't/shouldn't serialize; see prevAttrs.workDir.
fileset = null;
workDir = null;
};
resolveRelPath = p: lib.path.removePrefix root p;
makeFetchedSourceLayer =
finalScope: finalAttrs: prevAttrs:
let
workDirPath =
# Ideally we'd pick finalAttrs.workDir, but for now `mkDerivation` has
# the requirement that everything except passthru and meta must be
# serialized by mkDerivation, which doesn't work for this.
prevAttrs.workDir;
workDirSubpath = resolveRelPath workDirPath;
in
{
sourceRoot = "${finalScope.patchedSrc.name}/" + workDirSubpath;
src = finalScope.patchedSrc;
version =
let
n = lib.length finalScope.patches;
in
if n == 0 then finalAttrs.version else finalAttrs.version + "+${toString n}";
# Clear what `derivation` can't/shouldn't serialize; see prevAttrs.workDir.
fileset = null;
workDir = null;
};
mesonLayer = finalAttrs: prevAttrs: {
# NOTE:
# As of https://github.com/NixOS/nixpkgs/blob/8baf8241cea0c7b30e0b8ae73474cb3de83c1a30/pkgs/by-name/me/meson/setup-hook.sh#L26,
# `mesonBuildType` defaults to `plain` if not specified. We want our Nix-built binaries to be optimized by default.
# More on build types here: https://mesonbuild.com/Builtin-options.html#details-for-buildtype.
mesonBuildType = "release";
# NOTE:
# Users who are debugging Nix builds are expected to set the environment variable `mesonBuildType`, per the
# guidance in https://github.com/NixOS/nix/blob/8a3fc27f1b63a08ac983ee46435a56cf49ebaf4a/doc/manual/source/development/debugging.md?plain=1#L10.
# For this reason, we don't want to refer to `finalAttrs.mesonBuildType` here, but rather use the environment variable.
preConfigure =
prevAttrs.preConfigure or ""
+
lib.optionalString
(
!stdenv.hostPlatform.isWindows
# build failure
&& !stdenv.hostPlatform.isStatic
# LTO breaks exception handling on x86-64-darwin.
&& stdenv.system != "x86_64-darwin"
)
''
case "$mesonBuildType" in
release|minsize) appendToVar mesonFlags "-Db_lto=true" ;;
*) appendToVar mesonFlags "-Db_lto=false" ;;
esac
'';
nativeBuildInputs = [
meson
ninja
] ++ prevAttrs.nativeBuildInputs or [ ];
mesonCheckFlags = prevAttrs.mesonCheckFlags or [ ] ++ [
"--print-errorlogs"
];
};
mesonBuildLayer = finalAttrs: prevAttrs: {
nativeBuildInputs = prevAttrs.nativeBuildInputs or [ ] ++ [
pkg-config
];
separateDebugInfo = !stdenv.hostPlatform.isStatic;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
env =
prevAttrs.env or { }
// lib.optionalAttrs (
stdenv.isLinux
&& !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")
&& !(stdenv.hostPlatform.useLLVM or false)
) { LDFLAGS = "-fuse-ld=gold"; };
};
mesonLibraryLayer = finalAttrs: prevAttrs: {
outputs = prevAttrs.outputs or [ "out" ] ++ [ "dev" ];
};
# Work around weird `--as-needed` linker behavior with BSD, see
# https://github.com/mesonbuild/meson/issues/3593
bsdNoLinkAsNeeded =
finalAttrs: prevAttrs:
lib.optionalAttrs stdenv.hostPlatform.isBSD {
mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ];
};
miscGoodPractice = finalAttrs: prevAttrs: {
strictDeps = prevAttrs.strictDeps or true;
enableParallelBuilding = true;
};
/**
Append patches to the source layer.
*/
appendPatches =
scope: patches:
scope.overrideScope (
finalScope: prevScope: {
patches = prevScope.patches ++ patches;
}
);
in
# This becomes the pkgs.nixComponents attribute set
@ -27,6 +203,110 @@ in
version = baseVersion + versionSuffix;
inherit versionSuffix;
inherit filesetToSource;
/**
A user-provided extension function to apply to each component derivation.
*/
mesonComponentOverrides = finalAttrs: prevAttrs: { };
/**
An overridable derivation layer for handling the sources.
*/
sourceLayer = localSourceLayer;
/**
Resolve a path value to either itself or a path in the `src`, depending
whether `overrideSource` was called.
*/
resolvePath = p: p;
/**
Apply an extension function (i.e. overlay-shaped) to all component derivations.
*/
overrideAllMesonComponents =
f:
scope.overrideScope (
finalScope: prevScope: {
mesonComponentOverrides = lib.composeExtensions scope.mesonComponentOverrides f;
}
);
/**
Provide an alternate source. This allows the expressions to be vendored without copying the sources,
but it does make the build non-granular; all components will use a complete source.
Packaging expressions will be ignored.
*/
overrideSource =
src:
scope.overrideScope (
finalScope: prevScope: {
sourceLayer = makeFetchedSourceLayer finalScope;
/**
Unpatched source for the build of Nix. Packaging expressions will be ignored.
*/
src = src;
/**
Patches for the whole Nix source. Changes to packaging expressions will be ignored.
*/
patches = [ ];
/**
Fetched and patched source to be used in component derivations.
*/
patchedSrc =
if finalScope.patches == [ ] then
src
else
pkgs.buildPackages.srcOnly (
pkgs.buildPackages.stdenvNoCC.mkDerivation {
name = "${finalScope.src.name or "nix-source"}-patched";
inherit (finalScope) src patches;
}
);
resolvePath = p: finalScope.patchedSrc + "/${resolveRelPath p}";
appendPatches = appendPatches finalScope;
}
);
/**
Append patches to be applied to the whole Nix source.
This affects all components.
Changes to the packaging expressions will be ignored.
*/
appendPatches =
patches:
# switch to "fetched" source first, so that patches apply to the whole tree.
(scope.overrideSource "${./..}").appendPatches patches;
mkMesonDerivation = mkPackageBuilder [
miscGoodPractice
scope.sourceLayer
setVersionLayer
mesonLayer
scope.mesonComponentOverrides
];
mkMesonExecutable = mkPackageBuilder [
miscGoodPractice
bsdNoLinkAsNeeded
scope.sourceLayer
setVersionLayer
mesonLayer
mesonBuildLayer
scope.mesonComponentOverrides
];
mkMesonLibrary = mkPackageBuilder [
miscGoodPractice
bsdNoLinkAsNeeded
scope.sourceLayer
mesonLayer
setVersionLayer
mesonBuildLayer
mesonLibraryLayer
scope.mesonComponentOverrides
];
nix-util = callPackage ../src/libutil/package.nix { };
nix-util-c = callPackage ../src/libutil-c/package.nix { };
nix-util-test-support = callPackage ../src/libutil-test-support/package.nix { };
@ -66,5 +346,33 @@ in
nix-perl-bindings = callPackage ../src/perl/package.nix { };
nix-everything = callPackage ../packaging/everything.nix { };
nix-everything = callPackage ../packaging/everything.nix { } // {
# Note: no `passthru.overrideAllMesonComponents`
# This would propagate into `nix.overrideAttrs f`, but then discard
# `f` when `.overrideAllMesonComponents` is used.
# Both "methods" should be views on the same fixpoint overriding mechanism
# for that to work. For now, we intentionally don't support the broken
# two-fixpoint solution.
/**
Apply an extension function (i.e. overlay-shaped) to all component derivations, and return the nix package.
*/
overrideAllMesonComponents = f: (scope.overrideAllMesonComponents f).nix-everything;
/**
Append patches to be applied to the whole Nix source.
This affects all components.
Changes to the packaging expressions will be ignored.
*/
appendPatches = ps: (scope.appendPatches ps).nix-everything;
/**
Provide an alternate source. This allows the expressions to be vendored without copying the sources,
but it does make the build non-granular; all components will use a complete source.
Packaging expressions will be ignored.
*/
overrideSource = src: (scope.overrideSource src).nix-everything;
};
}

View file

@ -17,8 +17,6 @@ in
let
inherit (pkgs) lib;
root = ../.;
stdenv = if prevStdenv.isDarwin && prevStdenv.isx86_64 then darwinStdenv else prevStdenv;
# Fix the following error with the default x86_64-darwin SDK:
@ -30,113 +28,6 @@ let
# all the way back to 10.6.
darwinStdenv = pkgs.overrideSDK prevStdenv { darwinMinVersion = "10.13"; };
# Nixpkgs implements this by returning a subpath into the fetched Nix sources.
resolvePath = p: p;
# Indirection for Nixpkgs to override when package.nix files are vendored
filesetToSource = lib.fileset.toSource;
/**
Given a set of layers, create a mkDerivation-like function
*/
mkPackageBuilder =
exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn);
localSourceLayer =
finalAttrs: prevAttrs:
let
workDirPath =
# Ideally we'd pick finalAttrs.workDir, but for now `mkDerivation` has
# the requirement that everything except passthru and meta must be
# serialized by mkDerivation, which doesn't work for this.
prevAttrs.workDir;
workDirSubpath = lib.path.removePrefix root workDirPath;
sources =
assert prevAttrs.fileset._type == "fileset";
prevAttrs.fileset;
src = lib.fileset.toSource {
fileset = sources;
inherit root;
};
in
{
sourceRoot = "${src.name}/" + workDirSubpath;
inherit src;
# Clear what `derivation` can't/shouldn't serialize; see prevAttrs.workDir.
fileset = null;
workDir = null;
};
mesonLayer = finalAttrs: prevAttrs: {
# NOTE:
# As of https://github.com/NixOS/nixpkgs/blob/8baf8241cea0c7b30e0b8ae73474cb3de83c1a30/pkgs/by-name/me/meson/setup-hook.sh#L26,
# `mesonBuildType` defaults to `plain` if not specified. We want our Nix-built binaries to be optimized by default.
# More on build types here: https://mesonbuild.com/Builtin-options.html#details-for-buildtype.
mesonBuildType = "release";
# NOTE:
# Users who are debugging Nix builds are expected to set the environment variable `mesonBuildType`, per the
# guidance in https://github.com/NixOS/nix/blob/8a3fc27f1b63a08ac983ee46435a56cf49ebaf4a/doc/manual/source/development/debugging.md?plain=1#L10.
# For this reason, we don't want to refer to `finalAttrs.mesonBuildType` here, but rather use the environment variable.
preConfigure =
prevAttrs.preConfigure or ""
+
lib.optionalString
(
!stdenv.hostPlatform.isWindows
# build failure
&& !stdenv.hostPlatform.isStatic
# LTO breaks exception handling on x86-64-darwin.
&& stdenv.system != "x86_64-darwin"
)
''
case "$mesonBuildType" in
release|minsize) appendToVar mesonFlags "-Db_lto=true" ;;
*) appendToVar mesonFlags "-Db_lto=false" ;;
esac
'';
nativeBuildInputs = [
pkgs.buildPackages.meson
pkgs.buildPackages.ninja
] ++ prevAttrs.nativeBuildInputs or [ ];
mesonCheckFlags = prevAttrs.mesonCheckFlags or [ ] ++ [
"--print-errorlogs"
];
};
mesonBuildLayer = finalAttrs: prevAttrs: {
nativeBuildInputs = prevAttrs.nativeBuildInputs or [ ] ++ [
pkgs.buildPackages.pkg-config
];
separateDebugInfo = !stdenv.hostPlatform.isStatic;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
env =
prevAttrs.env or { }
// lib.optionalAttrs (
stdenv.isLinux
&& !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")
&& !(stdenv.hostPlatform.useLLVM or false)
) { LDFLAGS = "-fuse-ld=gold"; };
};
mesonLibraryLayer = finalAttrs: prevAttrs: {
outputs = prevAttrs.outputs or [ "out" ] ++ [ "dev" ];
};
# Work around weird `--as-needed` linker behavior with BSD, see
# https://github.com/mesonbuild/meson/issues/3593
bsdNoLinkAsNeeded =
finalAttrs: prevAttrs:
lib.optionalAttrs stdenv.hostPlatform.isBSD {
mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ];
};
miscGoodPractice = finalAttrs: prevAttrs: {
strictDeps = prevAttrs.strictDeps or true;
enableParallelBuilding = true;
};
in
scope: {
inherit stdenv;
@ -174,56 +65,39 @@ scope: {
installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase;
});
libgit2 = pkgs.libgit2.overrideAttrs (attrs: {
cmakeFlags = attrs.cmakeFlags or [ ] ++ [ "-DUSE_SSH=exec" ];
nativeBuildInputs =
attrs.nativeBuildInputs or [ ]
# gitMinimal does not build on Windows. See packbuilder patch.
++ lib.optionals (!stdenv.hostPlatform.isWindows) [
# Needed for `git apply`; see `prePatch`
pkgs.buildPackages.gitMinimal
];
# Only `git apply` can handle git binary patches
prePatch =
attrs.prePatch or ""
+ lib.optionalString (!stdenv.hostPlatform.isWindows) ''
patch() {
git apply
}
'';
patches =
attrs.patches or [ ]
++ [
./patches/libgit2-mempack-thin-packfile.patch
]
# gitMinimal does not build on Windows, but fortunately this patch only
# impacts interruptibility
++ lib.optionals (!stdenv.hostPlatform.isWindows) [
# binary patch; see `prePatch`
./patches/libgit2-packbuilder-callback-interruptible.patch
];
});
inherit resolvePath filesetToSource;
mkMesonDerivation = mkPackageBuilder [
miscGoodPractice
localSourceLayer
mesonLayer
];
mkMesonExecutable = mkPackageBuilder [
miscGoodPractice
bsdNoLinkAsNeeded
localSourceLayer
mesonLayer
mesonBuildLayer
];
mkMesonLibrary = mkPackageBuilder [
miscGoodPractice
bsdNoLinkAsNeeded
localSourceLayer
mesonLayer
mesonBuildLayer
mesonLibraryLayer
];
libgit2 = pkgs.libgit2.overrideAttrs (
attrs:
{
cmakeFlags = attrs.cmakeFlags or [ ] ++ [ "-DUSE_SSH=exec" ];
}
# libgit2: Nixpkgs 24.11 has < 1.9.0, which needs our patches
// lib.optionalAttrs (!lib.versionAtLeast pkgs.libgit2.version "1.9.0") {
nativeBuildInputs =
attrs.nativeBuildInputs or [ ]
# gitMinimal does not build on Windows. See packbuilder patch.
++ lib.optionals (!stdenv.hostPlatform.isWindows) [
# Needed for `git apply`; see `prePatch`
pkgs.buildPackages.gitMinimal
];
# Only `git apply` can handle git binary patches
prePatch =
attrs.prePatch or ""
+ lib.optionalString (!stdenv.hostPlatform.isWindows) ''
patch() {
git apply
}
'';
patches =
attrs.patches or [ ]
++ [
./patches/libgit2-mempack-thin-packfile.patch
]
# gitMinimal does not build on Windows, but fortunately this patch only
# impacts interruptibility
++ lib.optionals (!stdenv.hostPlatform.isWindows) [
# binary patch; see `prePatch`
./patches/libgit2-packbuilder-callback-interruptible.patch
];
}
);
}

View file

@ -1,6 +1,5 @@
{
lib,
inputs,
devFlake,
}:
@ -117,7 +116,7 @@ pkgs.nixComponents.nix-util.overrideAttrs (
pkgs.buildPackages.changelog-d
modular.pre-commit.settings.package
(pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript)
inputs.nixfmt.packages.${pkgs.hostPlatform.system}.default
pkgs.buildPackages.nixfmt-rfc-style
]
# TODO: Remove the darwin check once
# https://github.com/NixOS/nixpkgs/pull/291814 is available

View file

@ -51,7 +51,7 @@ static bool allSupportedLocally(Store & store, const std::set<std::string>& requ
static int main_build_remote(int argc, char * * argv)
{
{
logger = makeJSONLogger(*logger);
logger = makeJSONLogger(getStandardError());
/* Ensure we don't get any SSH passphrase or host key popups. */
unsetenv("DISPLAY");

View file

@ -37,7 +37,7 @@ EvalSettings evalSettings {
auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store);
auto storePath = nix::fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy, lockedRef.input.getName());
state.allowPath(storePath);
return state.rootPath(state.store->toRealPath(storePath));
return state.storePath(storePath);
},
},
},
@ -179,7 +179,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas
state.fetchSettings,
EvalSettings::resolvePseudoUrl(s));
auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy);
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
return state.storePath(storePath);
}
else if (hasPrefix(s, "flake:")) {
@ -188,7 +188,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas
auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store);
auto storePath = nix::fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy, lockedRef.input.getName());
state.allowPath(storePath);
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
return state.storePath(storePath);
}
else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {

View file

@ -64,14 +64,6 @@ mkMesonLibrary (finalAttrs: {
nlohmann_json
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
(lib.mesonEnable "markdown" enableMarkdown)
(lib.mesonOption "readline-flavor" readlineFlavor)

View file

@ -101,6 +101,9 @@ struct NixRepl
Value & v,
unsigned int maxDepth = std::numeric_limits<unsigned int>::max())
{
// Hide the progress bar during printing because it might interfere
logger->pause();
Finally resumeLoggerDefer([]() { logger->resume(); });
::nix::printValue(*state, str, v, PrintOptions {
.ansiColors = true,
.force = true,

View file

@ -36,14 +36,6 @@ mkMesonLibrary (finalAttrs: {
nix-expr
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: {
rapidcheck
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -46,14 +46,6 @@ mkMesonExecutable (finalAttrs: {
gtest
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -28,20 +28,15 @@ namespace nix {
};
class CaptureLogging {
Logger * oldLogger;
std::unique_ptr<CaptureLogger> tempLogger;
std::unique_ptr<Logger> oldLogger;
public:
CaptureLogging() : tempLogger(std::make_unique<CaptureLogger>()) {
oldLogger = logger;
logger = tempLogger.get();
CaptureLogging() {
oldLogger = std::move(logger);
logger = std::make_unique<CaptureLogger>();
}
~CaptureLogging() {
logger = oldLogger;
}
std::string get() const {
return tempLogger->get();
logger = std::move(oldLogger);
}
};
@ -113,7 +108,7 @@ namespace nix {
CaptureLogging l;
auto v = eval("builtins.trace \"test string 123\" 123");
ASSERT_THAT(v, IsIntEq(123));
auto text = l.get();
auto text = (dynamic_cast<CaptureLogger *>(logger.get()))->get();
ASSERT_NE(text.find("test string 123"), std::string::npos);
}

View file

@ -57,7 +57,7 @@ Strings EvalSettings::getDefaultNixPath()
{
Strings res;
auto add = [&](const Path & p, const std::string & s = std::string()) {
if (pathAccessible(p)) {
if (std::filesystem::exists(p)) {
if (s.empty()) {
res.push_back(p);
} else {

View file

@ -2,7 +2,6 @@
///@file
#include "config.hh"
#include "ref.hh"
#include "source-path.hh"
namespace nix {

View file

@ -246,15 +246,42 @@ EvalState::EvalState(
, repair(NoRepair)
, emptyBindings(0)
, rootFS(
settings.restrictEval || settings.pureEval
? ref<SourceAccessor>(AllowListSourceAccessor::create(getFSSourceAccessor(), {},
[&settings](const CanonPath & path) -> RestrictedPathError {
auto modeInformation = settings.pureEval
? "in pure evaluation mode (use '--impure' to override)"
: "in restricted mode";
throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation);
}))
: getFSSourceAccessor())
({
/* In pure eval mode, we provide a filesystem that only
contains the Nix store.
If we have a chroot store and pure eval is not enabled,
use a union accessor to make the chroot store available
at its logical location while still having the
underlying directory available. This is necessary for
instance if we're evaluating a file from the physical
/nix/store while using a chroot store. */
auto accessor = getFSSourceAccessor();
auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy));
if (settings.pureEval || store->storeDir != realStoreDir) {
auto storeFS = makeMountedSourceAccessor(
{
{CanonPath::root, makeEmptySourceAccessor()},
{CanonPath(store->storeDir), makeFSSourceAccessor(realStoreDir)}
});
accessor = settings.pureEval
? storeFS
: makeUnionSourceAccessor({accessor, storeFS});
}
/* Apply access control if needed. */
if (settings.restrictEval || settings.pureEval)
accessor = AllowListSourceAccessor::create(accessor, {},
[&settings](const CanonPath & path) -> RestrictedPathError {
auto modeInformation = settings.pureEval
? "in pure evaluation mode (use '--impure' to override)"
: "in restricted mode";
throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation);
});
accessor;
}))
, corepkgsFS(make_ref<MemorySourceAccessor>())
, internalFS(make_ref<MemorySourceAccessor>())
, derivationInternal{corepkgsFS->addFile(
@ -344,7 +371,7 @@ void EvalState::allowPath(const Path & path)
void EvalState::allowPath(const StorePath & storePath)
{
if (auto rootFS2 = rootFS.dynamic_pointer_cast<AllowListSourceAccessor>())
rootFS2->allowPrefix(CanonPath(store->toRealPath(storePath)));
rootFS2->allowPrefix(CanonPath(store->printStorePath(storePath)));
}
void EvalState::allowClosure(const StorePath & storePath)
@ -422,16 +449,6 @@ void EvalState::checkURI(const std::string & uri)
}
Path EvalState::toRealPath(const Path & path, const NixStringContext & context)
{
// FIXME: check whether 'path' is in 'context'.
return
!context.empty() && store->isInStore(path)
? store->toRealPath(path)
: path;
}
Value * EvalState::addConstant(const std::string & name, Value & v, Constant info)
{
Value * v2 = allocValue();
@ -2051,7 +2068,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
else if (firstType == nPath) {
if (!context.empty())
state.error<EvalError>("a string that refers to a store path cannot be appended to a path").atPos(pos).withFrame(env, *this).debugThrow();
v.mkPath(state.rootPath(CanonPath(canonPath(str()))));
v.mkPath(state.rootPath(CanonPath(str())));
} else
v.mkStringMove(c_str(), context);
}
@ -2432,7 +2449,7 @@ SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext
auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned();
if (path == "" || path[0] != '/')
error<EvalError>("string '%1%' doesn't represent an absolute path", path).withTrace(pos, errorCtx).debugThrow();
return rootPath(CanonPath(path));
return rootPath(path);
}
@ -3070,8 +3087,11 @@ std::optional<SourcePath> EvalState::resolveLookupPathPath(const LookupPath::Pat
auto i = lookupPathResolved.find(value);
if (i != lookupPathResolved.end()) return i->second;
auto finish = [&](SourcePath res) {
debug("resolved search path element '%s' to '%s'", value, res);
auto finish = [&](std::optional<SourcePath> res) {
if (res)
debug("resolved search path element '%s' to '%s'", value, *res);
else
debug("failed to resolve search path element '%s'", value);
lookupPathResolved.emplace(value, res);
return res;
};
@ -3083,7 +3103,7 @@ std::optional<SourcePath> EvalState::resolveLookupPathPath(const LookupPath::Pat
fetchSettings,
EvalSettings::resolvePseudoUrl(value));
auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy);
return finish(rootPath(store->toRealPath(storePath)));
return finish(this->storePath(storePath));
} catch (Error & e) {
logWarning({
.msg = HintFmt("Nix search path entry '%1%' cannot be downloaded, ignoring", value)
@ -3123,8 +3143,7 @@ std::optional<SourcePath> EvalState::resolveLookupPathPath(const LookupPath::Pat
}
}
debug("failed to resolve search path element '%s'", value);
return std::nullopt;
return finish(std::nullopt);
}

View file

@ -389,6 +389,15 @@ public:
*/
SourcePath rootPath(PathView path);
/**
* Return a `SourcePath` that refers to `path` in the store.
*
* For now, this has to also be within the root filesystem for
* backwards compat, but for Windows and maybe also pure eval, we'll
* probably want to do something different.
*/
SourcePath storePath(const StorePath & path);
/**
* Allow access to a path.
*/
@ -412,17 +421,6 @@ public:
void checkURI(const std::string & uri);
/**
* When using a diverted store and 'path' is in the Nix store, map
* 'path' to the diverted location (e.g. /nix/store/foo is mapped
* to /home/alice/my-nix/nix/store/foo). However, this is only
* done if the context is not empty, since otherwise we're
* probably trying to read from the actual /nix/store. This is
* intended to distinguish between import-from-derivation and
* sources stored in the actual /nix/store.
*/
Path toRealPath(const Path & path, const NixStringContext & context);
/**
* Parse a Nix expression from the specified file.
*/

View file

@ -77,14 +77,6 @@ mkMesonLibrary (finalAttrs: {
nlohmann_json
] ++ lib.optional enableGC boehmgc;
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
(lib.mesonEnable "gc" enableGC)
];

View file

@ -1,3 +1,4 @@
#include "store-api.hh"
#include "eval.hh"
namespace nix {
@ -12,4 +13,9 @@ SourcePath EvalState::rootPath(PathView path)
return {rootFS, CanonPath(absPath(path))};
}
SourcePath EvalState::storePath(const StorePath & path)
{
return {rootFS, CanonPath{store->printStorePath(path)}};
}
}

View file

@ -145,8 +145,7 @@ static SourcePath realisePath(EvalState & state, const PosIdx pos, Value & v, st
try {
if (!context.empty() && path.accessor == state.rootFS) {
auto rewrites = state.realiseContext(context);
auto realPath = state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context);
path = {path.accessor, CanonPath(realPath)};
path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))};
}
return resolveSymlinks ? path.resolveSymlinks(*resolveSymlinks) : path;
} catch (Error & e) {
@ -2479,21 +2478,11 @@ static void addPath(
const NixStringContext & context)
{
try {
StorePathSet refs;
if (path.accessor == state.rootFS && state.store->isInStore(path.path.abs())) {
// FIXME: handle CA derivation outputs (where path needs to
// be rewritten to the actual output).
auto rewrites = state.realiseContext(context);
path = {state.rootFS, CanonPath(state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context))};
try {
auto [storePath, subPath] = state.store->toStorePath(path.path.abs());
// FIXME: we should scanForReferences on the path before adding it
refs = state.store->queryPathInfo(storePath)->references;
path = {state.rootFS, CanonPath(state.store->toRealPath(storePath) + subPath)};
} catch (Error &) { // FIXME: should be InvalidPathError
}
path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))};
}
std::unique_ptr<PathFilter> filter;

View file

@ -367,6 +367,12 @@ static RegisterPrimOp primop_fetchTree({
Default: `false`
- `lfs` (Bool, optional)
Fetch any [Git LFS](https://git-lfs.com/) files.
Default: `false`
- `allRefs` (Bool, optional)
By default, this has no effect. This becomes relevant only once `shallow` cloning is disabled.
@ -691,6 +697,13 @@ static RegisterPrimOp primop_fetchGit({
Make a shallow clone when fetching the Git tree.
When this is enabled, the options `ref` and `allRefs` have no effect anymore.
- `lfs` (default: `false`)
A boolean that when `true` specifies that [Git LFS] files should be fetched.
[Git LFS]: https://git-lfs.com/
- `allRefs`
Whether to fetch all references (eg. branches and tags) of the repository.

View file

@ -7,13 +7,18 @@
#include <gtest/gtest.h>
#include "fs-sink.hh"
#include "serialise.hh"
#include "git-lfs-fetch.hh"
namespace nix {
namespace fs {
using namespace std::filesystem;
}
class GitUtilsTest : public ::testing::Test
{
// We use a single repository for all tests.
Path tmpDir;
fs::path tmpDir;
std::unique_ptr<AutoDelete> delTmpDir;
public:
@ -41,6 +46,11 @@ public:
{
return GitRepo::openRepo(tmpDir, true, false);
}
std::string getRepoName() const
{
return tmpDir.filename();
}
};
void writeString(CreateRegularFileSink & fileSink, std::string contents, bool executable)
@ -78,7 +88,7 @@ TEST_F(GitUtilsTest, sink_basic)
// sink->createHardlink("foo-1.1/links/foo-2", CanonPath("foo-1.1/hello"));
auto result = repo->dereferenceSingletonDirectory(sink->flush());
auto accessor = repo->getAccessor(result, false);
auto accessor = repo->getAccessor(result, false, getRepoName());
auto entries = accessor->readDirectory(CanonPath::root);
ASSERT_EQ(entries.size(), 5);
ASSERT_EQ(accessor->readFile(CanonPath("hello")), "hello world");

View file

@ -31,6 +31,9 @@ deps_private += rapidcheck
gtest = dependency('gtest', main : true)
deps_private += gtest
libgit2 = dependency('libgit2')
deps_private += libgit2
add_project_arguments(
# TODO(Qyriad): Yes this is how the autoconf+Make system did it.
# It would be nice for our headers to be idempotent instead.
@ -43,8 +46,9 @@ add_project_arguments(
subdir('nix-meson-build-support/common')
sources = files(
'public-key.cc',
'access-tokens.cc',
'git-utils.cc',
'public-key.cc',
)
include_dirs = [include_directories('.')]

View file

@ -7,6 +7,7 @@
nix-fetchers,
nix-store-test-support,
libgit2,
rapidcheck,
gtest,
runCommand,
@ -42,16 +43,9 @@ mkMesonExecutable (finalAttrs: {
nix-store-test-support
rapidcheck
gtest
libgit2
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -0,0 +1,279 @@
#include "git-lfs-fetch.hh"
#include "git-utils.hh"
#include "filetransfer.hh"
#include "processes.hh"
#include "url.hh"
#include "users.hh"
#include "hash.hh"
#include <git2/attr.h>
#include <git2/config.h>
#include <git2/errors.h>
#include <git2/remote.h>
#include <nlohmann/json.hpp>
namespace nix::lfs {
// if authHeader is "", downloadToSink assumes no auth is expected
static void downloadToSink(
const std::string & url,
const std::string & authHeader,
// FIXME: passing a StringSink is superfluous, we may as well
// return a string. Or use an abstract Sink for streaming.
StringSink & sink,
std::string sha256Expected,
size_t sizeExpected)
{
FileTransferRequest request(url);
Headers headers;
if (!authHeader.empty())
headers.push_back({"Authorization", authHeader});
request.headers = headers;
getFileTransfer()->download(std::move(request), sink);
auto sizeActual = sink.s.length();
if (sizeExpected != sizeActual)
throw Error("size mismatch while fetching %s: expected %d but got %d", url, sizeExpected, sizeActual);
auto sha256Actual = hashString(HashAlgorithm::SHA256, sink.s).to_string(HashFormat::Base16, false);
if (sha256Actual != sha256Expected)
throw Error(
"hash mismatch while fetching %s: expected sha256:%s but got sha256:%s", url, sha256Expected, sha256Actual);
}
static std::string getLfsApiToken(const ParsedURL & url)
{
auto [status, output] = runProgram(RunOptions{
.program = "ssh",
.args = {*url.authority, "git-lfs-authenticate", url.path, "download"},
});
if (output.empty())
throw Error(
"git-lfs-authenticate: no output (cmd: ssh %s git-lfs-authenticate %s download)",
url.authority.value_or(""),
url.path);
auto queryResp = nlohmann::json::parse(output);
if (!queryResp.contains("header"))
throw Error("no header in git-lfs-authenticate response");
if (!queryResp["header"].contains("Authorization"))
throw Error("no Authorization in git-lfs-authenticate response");
return queryResp["header"]["Authorization"].get<std::string>();
}
typedef std::unique_ptr<git_config, Deleter<git_config_free>> GitConfig;
typedef std::unique_ptr<git_config_entry, Deleter<git_config_entry_free>> GitConfigEntry;
static std::string getLfsEndpointUrl(git_repository * repo)
{
GitConfig config;
if (git_repository_config(Setter(config), repo)) {
GitConfigEntry entry;
if (!git_config_get_entry(Setter(entry), config.get(), "lfs.url")) {
auto value = std::string(entry->value);
if (!value.empty()) {
debug("Found explicit lfs.url value: %s", value);
return value;
}
}
}
git_remote * remote = nullptr;
if (git_remote_lookup(&remote, repo, "origin"))
return "";
const char * url_c_str = git_remote_url(remote);
if (!url_c_str)
return "";
return std::string(url_c_str);
}
static std::optional<Pointer> parseLfsPointer(std::string_view content, std::string_view filename)
{
// https://github.com/git-lfs/git-lfs/blob/2ef4108/docs/spec.md
//
// example git-lfs pointer file:
// version https://git-lfs.github.com/spec/v1
// oid sha256:f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf
// size 10000000
// (ending \n)
if (!content.starts_with("version ")) {
// Invalid pointer file
return std::nullopt;
}
if (!content.starts_with("version https://git-lfs.github.com/spec/v1")) {
// In case there's new spec versions in the future, but for now only v1 exists
debug("Invalid version found on potential lfs pointer file, skipping");
return std::nullopt;
}
std::string oid;
std::string size;
for (auto & line : tokenizeString<Strings>(content, "\n")) {
if (line.starts_with("version ")) {
continue;
}
if (line.starts_with("oid sha256:")) {
oid = line.substr(11); // skip "oid sha256:"
continue;
}
if (line.starts_with("size ")) {
size = line.substr(5); // skip "size "
continue;
}
debug("Custom extension '%s' found, ignoring", line);
}
if (oid.length() != 64 || !std::all_of(oid.begin(), oid.end(), ::isxdigit)) {
debug("Invalid sha256 %s, skipping", oid);
return std::nullopt;
}
if (size.length() == 0 || !std::all_of(size.begin(), size.end(), ::isdigit)) {
debug("Invalid size %s, skipping", size);
return std::nullopt;
}
return std::make_optional(Pointer{oid, std::stoul(size)});
}
Fetch::Fetch(git_repository * repo, git_oid rev)
{
this->repo = repo;
this->rev = rev;
const auto remoteUrl = lfs::getLfsEndpointUrl(repo);
this->url = nix::parseURL(nix::fixGitURL(remoteUrl)).canonicalise();
}
bool Fetch::shouldFetch(const CanonPath & path) const
{
const char * attr = nullptr;
git_attr_options opts = GIT_ATTR_OPTIONS_INIT;
opts.attr_commit_id = this->rev;
opts.flags = GIT_ATTR_CHECK_INCLUDE_COMMIT | GIT_ATTR_CHECK_NO_SYSTEM;
if (git_attr_get_ext(&attr, (git_repository *) (this->repo), &opts, path.rel_c_str(), "filter"))
throw Error("cannot get git-lfs attribute: %s", git_error_last()->message);
debug("Git filter for '%s' is '%s'", path, attr ? attr : "null");
return attr != nullptr && !std::string(attr).compare("lfs");
}
static nlohmann::json pointerToPayload(const std::vector<Pointer> & items)
{
nlohmann::json jArray = nlohmann::json::array();
for (const auto & pointer : items)
jArray.push_back({{"oid", pointer.oid}, {"size", pointer.size}});
return jArray;
}
std::vector<nlohmann::json> Fetch::fetchUrls(const std::vector<Pointer> & pointers) const
{
ParsedURL httpUrl(url);
httpUrl.scheme = url.scheme == "ssh" ? "https" : url.scheme;
FileTransferRequest request(httpUrl.to_string() + "/info/lfs/objects/batch");
request.post = true;
Headers headers;
if (this->url.scheme == "ssh")
headers.push_back({"Authorization", lfs::getLfsApiToken(this->url)});
headers.push_back({"Content-Type", "application/vnd.git-lfs+json"});
headers.push_back({"Accept", "application/vnd.git-lfs+json"});
request.headers = headers;
nlohmann::json oidList = pointerToPayload(pointers);
nlohmann::json data = {{"operation", "download"}};
data["objects"] = oidList;
request.data = data.dump();
FileTransferResult result = getFileTransfer()->upload(request);
auto responseString = result.data;
std::vector<nlohmann::json> objects;
// example resp here:
// {"objects":[{"oid":"f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf","size":10000000,"actions":{"download":{"href":"https://gitlab.com/b-camacho/test-lfs.git/gitlab-lfs/objects/f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf","header":{"Authorization":"Basic
// Yi1jYW1hY2hvOmV5SjBlWEFpT2lKS1YxUWlMQ0poYkdjaU9pSklVekkxTmlKOS5leUprWVhSaElqcDdJbUZqZEc5eUlqb2lZaTFqWVcxaFkyaHZJbjBzSW1wMGFTSTZJbUptTURZNFpXVTFMVEprWmpVdE5HWm1ZUzFpWWpRMExUSXpNVEV3WVRReU1qWmtaaUlzSW1saGRDSTZNVGN4TkRZeE16ZzBOU3dpYm1KbUlqb3hOekUwTmpFek9EUXdMQ0psZUhBaU9qRTNNVFEyTWpFd05EVjkuZk9yMDNkYjBWSTFXQzFZaTBKRmJUNnJTTHJPZlBwVW9lYllkT0NQZlJ4QQ=="}}},"authenticated":true}]}
try {
auto resp = nlohmann::json::parse(responseString);
if (resp.contains("objects"))
objects.insert(objects.end(), resp["objects"].begin(), resp["objects"].end());
else
throw Error("response does not contain 'objects'");
return objects;
} catch (const nlohmann::json::parse_error & e) {
printMsg(lvlTalkative, "Full response: '%1%'", responseString);
throw Error("response did not parse as json: %s", e.what());
}
}
void Fetch::fetch(
const std::string & content,
const CanonPath & pointerFilePath,
StringSink & sink,
std::function<void(uint64_t)> sizeCallback) const
{
debug("trying to fetch '%s' using git-lfs", pointerFilePath);
if (content.length() >= 1024) {
warn("encountered file '%s' that should have been a git-lfs pointer, but is too large", pointerFilePath);
sizeCallback(content.length());
sink(content);
return;
}
const auto pointer = parseLfsPointer(content, pointerFilePath.rel());
if (pointer == std::nullopt) {
warn("encountered file '%s' that should have been a git-lfs pointer, but is invalid", pointerFilePath);
sizeCallback(content.length());
sink(content);
return;
}
Path cacheDir = getCacheDir() + "/git-lfs";
std::string key = hashString(HashAlgorithm::SHA256, pointerFilePath.rel()).to_string(HashFormat::Base16, false)
+ "/" + pointer->oid;
Path cachePath = cacheDir + "/" + key;
if (pathExists(cachePath)) {
debug("using cache entry %s -> %s", key, cachePath);
sink(readFile(cachePath));
return;
}
debug("did not find cache entry for %s", key);
std::vector<Pointer> pointers;
pointers.push_back(pointer.value());
const auto objUrls = fetchUrls(pointers);
const auto obj = objUrls[0];
try {
std::string sha256 = obj.at("oid"); // oid is also the sha256
std::string ourl = obj.at("actions").at("download").at("href");
std::string authHeader = "";
if (obj.at("actions").at("download").contains("header")
&& obj.at("actions").at("download").at("header").contains("Authorization")) {
authHeader = obj["actions"]["download"]["header"]["Authorization"];
}
const uint64_t size = obj.at("size");
sizeCallback(size);
downloadToSink(ourl, authHeader, sink, sha256, size);
debug("creating cache entry %s -> %s", key, cachePath);
if (!pathExists(dirOf(cachePath)))
createDirs(dirOf(cachePath));
writeFile(cachePath, sink.s);
debug("%s fetched with git-lfs", pointerFilePath);
} catch (const nlohmann::json::out_of_range & e) {
throw Error("bad json from /info/lfs/objects/batch: %s %s", obj, e.what());
}
}
} // namespace nix::lfs

View file

@ -0,0 +1,43 @@
#include "canon-path.hh"
#include "serialise.hh"
#include "url.hh"
#include <git2/repository.h>
#include <nlohmann/json_fwd.hpp>
namespace nix::lfs {
/**
* git-lfs pointer
* @see https://github.com/git-lfs/git-lfs/blob/2ef4108/docs/spec.md
*/
struct Pointer
{
std::string oid; // git-lfs managed object id. you give this to the lfs server
// for downloads
size_t size; // in bytes
};
struct Fetch
{
// Reference to the repository
const git_repository * repo;
// Git commit being fetched
git_oid rev;
// derived from git remote url
nix::ParsedURL url;
Fetch(git_repository * repo, git_oid rev);
bool shouldFetch(const CanonPath & path) const;
void fetch(
const std::string & content,
const CanonPath & pointerFilePath,
StringSink & sink,
std::function<void(uint64_t)> sizeCallback) const;
std::vector<nlohmann::json> fetchUrls(const std::vector<Pointer> & pointers) const;
};
} // namespace nix::lfs

View file

@ -1,4 +1,5 @@
#include "git-utils.hh"
#include "git-lfs-fetch.hh"
#include "cache.hh"
#include "finally.hh"
#include "processes.hh"
@ -60,14 +61,6 @@ namespace nix {
struct GitSourceAccessor;
// Some wrapper types that ensure that the git_*_free functions get called.
template<auto del>
struct Deleter
{
template <typename T>
void operator()(T * p) const { del(p); };
};
typedef std::unique_ptr<git_repository, Deleter<git_repository_free>> Repository;
typedef std::unique_ptr<git_tree_entry, Deleter<git_tree_entry_free>> TreeEntry;
typedef std::unique_ptr<git_tree, Deleter<git_tree_free>> Tree;
@ -85,20 +78,6 @@ typedef std::unique_ptr<git_odb, Deleter<git_odb_free>> ObjectDb;
typedef std::unique_ptr<git_packbuilder, Deleter<git_packbuilder_free>> PackBuilder;
typedef std::unique_ptr<git_indexer, Deleter<git_indexer_free>> Indexer;
// A helper to ensure that we don't leak objects returned by libgit2.
template<typename T>
struct Setter
{
T & t;
typename T::pointer p = nullptr;
Setter(T & t) : t(t) { }
~Setter() { if (p) t = T(p); }
operator typename T::pointer * () { return &p; }
};
Hash toHash(const git_oid & oid)
{
#ifdef GIT_EXPERIMENTAL_SHA256
@ -506,12 +485,15 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this<GitRepoImpl>
/**
* A 'GitSourceAccessor' with no regard for export-ignore or any other transformations.
*/
ref<GitSourceAccessor> getRawAccessor(const Hash & rev);
ref<GitSourceAccessor> getRawAccessor(
const Hash & rev,
bool smudgeLfs = false);
ref<SourceAccessor> getAccessor(
const Hash & rev,
bool exportIgnore,
std::string displayPrefix) override;
std::string displayPrefix,
bool smudgeLfs = false) override;
ref<SourceAccessor> getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError e) override;
@ -670,24 +652,40 @@ ref<GitRepo> GitRepo::openRepo(const std::filesystem::path & path, bool create,
/**
* Raw git tree input accessor.
*/
struct GitSourceAccessor : SourceAccessor
{
ref<GitRepoImpl> repo;
Object root;
std::optional<lfs::Fetch> lfsFetch = std::nullopt;
GitSourceAccessor(ref<GitRepoImpl> repo_, const Hash & rev)
GitSourceAccessor(ref<GitRepoImpl> repo_, const Hash & rev, bool smudgeLfs)
: repo(repo_)
, root(peelToTreeOrBlob(lookupObject(*repo, hashToOID(rev)).get()))
{
if (smudgeLfs)
lfsFetch = std::make_optional(lfs::Fetch(*repo, hashToOID(rev)));
}
std::string readBlob(const CanonPath & path, bool symlink)
{
auto blob = getBlob(path, symlink);
const auto blob = getBlob(path, symlink);
auto data = std::string_view((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get()));
if (lfsFetch) {
if (lfsFetch->shouldFetch(path)) {
StringSink s;
try {
auto contents = std::string((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get()));
lfsFetch->fetch(contents, path, s, [&s](uint64_t size){ s.s.reserve(size); });
} catch (Error & e) {
e.addTrace({}, "while smudging git-lfs file '%s'", path);
throw;
}
return s.s;
}
}
return std::string(data);
return std::string((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get()));
}
std::string readFile(const CanonPath & path) override
@ -1191,19 +1189,22 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink
}
};
ref<GitSourceAccessor> GitRepoImpl::getRawAccessor(const Hash & rev)
ref<GitSourceAccessor> GitRepoImpl::getRawAccessor(
const Hash & rev,
bool smudgeLfs)
{
auto self = ref<GitRepoImpl>(shared_from_this());
return make_ref<GitSourceAccessor>(self, rev);
return make_ref<GitSourceAccessor>(self, rev, smudgeLfs);
}
ref<SourceAccessor> GitRepoImpl::getAccessor(
const Hash & rev,
bool exportIgnore,
std::string displayPrefix)
std::string displayPrefix,
bool smudgeLfs)
{
auto self = ref<GitRepoImpl>(shared_from_this());
ref<GitSourceAccessor> rawGitAccessor = getRawAccessor(rev);
ref<GitSourceAccessor> rawGitAccessor = getRawAccessor(rev, smudgeLfs);
rawGitAccessor->setPathDisplay(std::move(displayPrefix));
if (exportIgnore)
return make_ref<GitExportIgnoreSourceAccessor>(self, rawGitAccessor, rev);

View file

@ -89,7 +89,8 @@ struct GitRepo
virtual ref<SourceAccessor> getAccessor(
const Hash & rev,
bool exportIgnore,
std::string displayPrefix) = 0;
std::string displayPrefix,
bool smudgeLfs = false) = 0;
virtual ref<SourceAccessor> getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError makeNotAllowedError) = 0;
@ -126,4 +127,26 @@ struct GitRepo
ref<GitRepo> getTarballCache();
// A helper to ensure that the `git_*_free` functions get called.
template<auto del>
struct Deleter
{
template <typename T>
void operator()(T * p) const { del(p); };
};
// A helper to ensure that we don't leak objects returned by libgit2.
template<typename T>
struct Setter
{
T & t;
typename T::pointer p = nullptr;
Setter(T & t) : t(t) { }
~Setter() { if (p) t = T(p); }
operator typename T::pointer * () { return &p; }
};
}

View file

@ -9,7 +9,6 @@
#include "pathlocks.hh"
#include "processes.hh"
#include "git.hh"
#include "mounted-source-accessor.hh"
#include "git-utils.hh"
#include "logging.hh"
#include "finally.hh"
@ -185,7 +184,7 @@ struct GitInputScheme : InputScheme
for (auto & [name, value] : url.query) {
if (name == "rev" || name == "ref" || name == "keytype" || name == "publicKey" || name == "publicKeys")
attrs.emplace(name, value);
else if (name == "shallow" || name == "submodules" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit")
else if (name == "shallow" || name == "submodules" || name == "lfs" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit")
attrs.emplace(name, Explicit<bool> { value == "1" });
else
url2.query.emplace(name, value);
@ -210,6 +209,7 @@ struct GitInputScheme : InputScheme
"rev",
"shallow",
"submodules",
"lfs",
"exportIgnore",
"lastModified",
"revCount",
@ -262,6 +262,8 @@ struct GitInputScheme : InputScheme
if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref);
if (getShallowAttr(input))
url.query.insert_or_assign("shallow", "1");
if (getLfsAttr(input))
url.query.insert_or_assign("lfs", "1");
if (getSubmodulesAttr(input))
url.query.insert_or_assign("submodules", "1");
if (maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false))
@ -411,6 +413,11 @@ struct GitInputScheme : InputScheme
return maybeGetBoolAttr(input.attrs, "submodules").value_or(false);
}
bool getLfsAttr(const Input & input) const
{
return maybeGetBoolAttr(input.attrs, "lfs").value_or(false);
}
bool getExportIgnoreAttr(const Input & input) const
{
return maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false);
@ -678,7 +685,8 @@ struct GitInputScheme : InputScheme
verifyCommit(input, repo);
bool exportIgnore = getExportIgnoreAttr(input);
auto accessor = repo->getAccessor(rev, exportIgnore, "«" + input.to_string() + "»");
bool smudgeLfs = getLfsAttr(input);
auto accessor = repo->getAccessor(rev, exportIgnore, "«" + input.to_string() + "»", smudgeLfs);
/* If the repo has submodules, fetch them and return a mounted
input accessor consisting of the accessor for the top-level
@ -698,6 +706,7 @@ struct GitInputScheme : InputScheme
attrs.insert_or_assign("rev", submoduleRev.gitRev());
attrs.insert_or_assign("exportIgnore", Explicit<bool>{ exportIgnore });
attrs.insert_or_assign("submodules", Explicit<bool>{ true });
attrs.insert_or_assign("lfs", Explicit<bool>{ smudgeLfs });
attrs.insert_or_assign("allRefs", Explicit<bool>{ true });
auto submoduleInput = fetchers::Input::fromAttrs(*input.settings, std::move(attrs));
auto [submoduleAccessor, submoduleInput2] =
@ -838,7 +847,7 @@ struct GitInputScheme : InputScheme
{
auto makeFingerprint = [&](const Hash & rev)
{
return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "");
return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "") + (getLfsAttr(input) ? ";l" : "");
};
if (auto rev = input.getRev())

View file

@ -14,7 +14,7 @@ cxx = meson.get_compiler('cpp')
subdir('nix-meson-build-support/deps-lists')
configdata = configuration_data()
configuration_data()
deps_private_maybe_subproject = [
]
@ -48,12 +48,12 @@ sources = files(
'fetch-to-store.cc',
'fetchers.cc',
'filtering-source-accessor.cc',
'git-lfs-fetch.cc',
'git-utils.cc',
'git.cc',
'github.cc',
'indirect.cc',
'mercurial.cc',
'mounted-source-accessor.cc',
'path.cc',
'registry.cc',
'store-path-accessor.cc',
@ -69,8 +69,8 @@ headers = files(
'fetch-to-store.hh',
'fetchers.hh',
'filtering-source-accessor.hh',
'git-lfs-fetch.hh',
'git-utils.hh',
'mounted-source-accessor.hh',
'registry.hh',
'store-path-accessor.hh',
'tarball.hh',

View file

@ -1,9 +0,0 @@
#pragma once
#include "source-accessor.hh"
namespace nix {
ref<SourceAccessor> makeMountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> mounts);
}

View file

@ -41,14 +41,6 @@ mkMesonLibrary (finalAttrs: {
nlohmann_json
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows;
};

View file

@ -38,14 +38,6 @@ mkMesonLibrary (finalAttrs: {
nix-flake
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -46,14 +46,6 @@ mkMesonExecutable (finalAttrs: {
gtest
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -337,7 +337,7 @@ static Flake readFlake(
auto storePath = fetchToStore(*state.store, setting.value->path(), FetchMode::Copy);
flake.config.settings.emplace(
state.symbols[setting.name],
state.store->toRealPath(storePath));
state.store->printStorePath(storePath));
}
else if (setting.value->type() == nInt)
flake.config.settings.emplace(
@ -381,7 +381,7 @@ static FlakeRef applySelfAttrs(
{
auto newRef(ref);
std::set<std::string> allowedAttrs{"submodules"};
std::set<std::string> allowedAttrs{"submodules", "lfs"};
for (auto & attr : flake.selfAttrs) {
if (!allowedAttrs.contains(attr.first))
@ -423,7 +423,7 @@ static Flake getFlake(
auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, accessor);
// Re-parse flake.nix from the store.
return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootAttrPath);
return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath);
}
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries)
@ -784,7 +784,7 @@ LockedFlake lockFlake(
// FIXME: allow input to be lazy.
auto storePath = copyInputToStore(state, lockedRef.input, input.ref->input, accessor);
return {state.rootPath(state.store->toRealPath(storePath)), lockedRef};
return {state.storePath(storePath), lockedRef};
}
}();
@ -921,21 +921,6 @@ LockedFlake lockFlake(
}
}
std::pair<StorePath, Path> sourcePathToStorePath(
ref<Store> store,
const SourcePath & _path)
{
auto path = _path.path.abs();
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>()) {
auto realStoreDir = store2->getRealStoreDir();
if (isInDir(path, realStoreDir))
path = store2->storeDir + path.substr(realStoreDir.size());
}
return store->toStorePath(path);
}
void callFlake(EvalState & state,
const LockedFlake & lockedFlake,
Value & vRes)
@ -953,7 +938,7 @@ void callFlake(EvalState & state,
auto lockedNode = node.dynamic_pointer_cast<const LockedNode>();
auto [storePath, subdir] = sourcePathToStorePath(state.store, sourcePath);
auto [storePath, subdir] = state.store->toStorePath(sourcePath.path.abs());
emitTreeAttrs(
state,

View file

@ -234,16 +234,6 @@ void callFlake(
const LockedFlake & lockedFlake,
Value & v);
/**
* Map a `SourcePath` to the corresponding store path. This is a
* temporary hack to support chroot stores while we don't have full
* lazy trees. FIXME: Remove this once we can pass a sourcePath rather
* than a storePath to call-flake.nix.
*/
std::pair<StorePath, Path> sourcePathToStorePath(
ref<Store> store,
const SourcePath & path);
}
void emitTreeAttrs(

View file

@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: {
nlohmann_json
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows;
};

View file

@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: {
nix-main
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -6,7 +6,8 @@ namespace nix {
LogFormat defaultLogFormat = LogFormat::raw;
LogFormat parseLogFormat(const std::string & logFormatStr) {
LogFormat parseLogFormat(const std::string & logFormatStr)
{
if (logFormatStr == "raw" || getEnv("NIX_GET_COMPLETIONS"))
return LogFormat::raw;
else if (logFormatStr == "raw-with-logs")
@ -20,14 +21,15 @@ LogFormat parseLogFormat(const std::string & logFormatStr) {
throw Error("option 'log-format' has an invalid value '%s'", logFormatStr);
}
Logger * makeDefaultLogger() {
std::unique_ptr<Logger> makeDefaultLogger()
{
switch (defaultLogFormat) {
case LogFormat::raw:
return makeSimpleLogger(false);
case LogFormat::rawWithLogs:
return makeSimpleLogger(true);
case LogFormat::internalJSON:
return makeJSONLogger(*makeSimpleLogger(true));
return makeJSONLogger(getStandardError());
case LogFormat::bar:
return makeProgressBar();
case LogFormat::barWithLogs: {
@ -40,16 +42,19 @@ Logger * makeDefaultLogger() {
}
}
void setLogFormat(const std::string & logFormatStr) {
void setLogFormat(const std::string & logFormatStr)
{
setLogFormat(parseLogFormat(logFormatStr));
}
void setLogFormat(const LogFormat & logFormat) {
void setLogFormat(const LogFormat & logFormat)
{
defaultLogFormat = logFormat;
createDefaultLogger();
}
void createDefaultLogger() {
void createDefaultLogger()
{
logger = makeDefaultLogger();
}

View file

@ -37,14 +37,6 @@ mkMesonLibrary (finalAttrs: {
openssl
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows;
};

View file

@ -117,13 +117,15 @@ public:
{
{
auto state(state_.lock());
if (!state->active) return;
state->active = false;
writeToStderr("\r\e[K");
updateCV.notify_one();
quitCV.notify_one();
if (state->active) {
state->active = false;
writeToStderr("\r\e[K");
updateCV.notify_one();
quitCV.notify_one();
}
}
updateThread.join();
if (updateThread.joinable())
updateThread.join();
}
void pause() override {
@ -553,9 +555,9 @@ public:
}
};
Logger * makeProgressBar()
std::unique_ptr<Logger> makeProgressBar()
{
return new ProgressBar(isTTY());
return std::make_unique<ProgressBar>(isTTY());
}
void startProgressBar()
@ -565,9 +567,8 @@ void startProgressBar()
void stopProgressBar()
{
auto progressBar = dynamic_cast<ProgressBar *>(logger);
if (progressBar) progressBar->stop();
if (auto progressBar = dynamic_cast<ProgressBar *>(logger.get()))
progressBar->stop();
}
}

View file

@ -5,7 +5,7 @@
namespace nix {
Logger * makeProgressBar();
std::unique_ptr<Logger> makeProgressBar();
void startProgressBar();

View file

@ -315,20 +315,6 @@ void printVersion(const std::string & programName)
throw Exit();
}
void showManPage(const std::string & name)
{
restoreProcessContext();
setEnv("MANPATH", settings.nixManDir.c_str());
execlp("man", "man", name.c_str(), nullptr);
if (errno == ENOENT) {
// Not SysError because we don't want to suffix the errno, aka No such file or directory.
throw Error("The '%1%' command was not found, but it is needed for '%2%' and some other '%3%' commands' help text. Perhaps you could install the '%1%' command?", "man", name.c_str(), "nix-*");
}
throw SysError("command 'man %1%' failed", name.c_str());
}
int handleExceptions(const std::string & programName, std::function<void()> fun)
{
ReceiveInterrupts receiveInterrupts; // FIXME: need better place for this

View file

@ -70,11 +70,6 @@ struct LegacyArgs : public MixCommonArgs, public RootArgs
};
/**
* Show the manual page for the specified program.
*/
void showManPage(const std::string & name);
/**
* The constructor of this class starts a pager if standard output is a
* terminal and $PAGER is set. Standard output is redirected to the

View file

@ -36,14 +36,6 @@ mkMesonLibrary (finalAttrs: {
nix-store
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: {
rapidcheck
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -3,13 +3,15 @@
#include "experimental-features.hh"
#include "derivations.hh"
#include "tests/libstore.hh"
#include "tests/characterization.hh"
#include "derivations.hh"
#include "derivation-options.hh"
#include "parsed-derivations.hh"
#include "types.hh"
#include "json-utils.hh"
#include "tests/libstore.hh"
#include "tests/characterization.hh"
namespace nix {
using nlohmann::json;
@ -80,21 +82,30 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_defaults)
auto drvPath = writeDerivation(*store, got, NoRepair, true);
ParsedDerivation parsedDrv(drvPath, got);
DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv);
EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "");
EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), false);
EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings());
EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings());
EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), false);
EXPECT_EQ(parsedDrv.getStringsAttr("allowedReferences"), std::nullopt);
EXPECT_EQ(parsedDrv.getStringsAttr("allowedRequisites"), std::nullopt);
EXPECT_EQ(parsedDrv.getStringsAttr("disallowedReferences"), std::nullopt);
EXPECT_EQ(parsedDrv.getStringsAttr("disallowedRequisites"), std::nullopt);
EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), StringSet());
EXPECT_EQ(parsedDrv.canBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.willBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.substitutesAllowed(), true);
EXPECT_EQ(parsedDrv.useUidRange(), false);
EXPECT_TRUE(!parsedDrv.hasStructuredAttrs());
EXPECT_EQ(options.additionalSandboxProfile, "");
EXPECT_EQ(options.noChroot, false);
EXPECT_EQ(options.impureHostDeps, StringSet{});
EXPECT_EQ(options.impureEnvVars, StringSet{});
EXPECT_EQ(options.allowLocalNetworking, false);
{
auto * checksForAllOutputs_ = std::get_if<0>(&options.outputChecks);
ASSERT_TRUE(checksForAllOutputs_ != nullptr);
auto & checksForAllOutputs = *checksForAllOutputs_;
EXPECT_EQ(checksForAllOutputs.allowedReferences, std::nullopt);
EXPECT_EQ(checksForAllOutputs.allowedRequisites, std::nullopt);
EXPECT_EQ(checksForAllOutputs.disallowedReferences, StringSet{});
EXPECT_EQ(checksForAllOutputs.disallowedRequisites, StringSet{});
}
EXPECT_EQ(options.getRequiredSystemFeatures(got), StringSet());
EXPECT_EQ(options.canBuildLocally(*store, got), false);
EXPECT_EQ(options.willBuildLocally(*store, got), false);
EXPECT_EQ(options.substitutesAllowed(), true);
EXPECT_EQ(options.useUidRange(got), false);
});
};
@ -106,29 +117,36 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes)
auto drvPath = writeDerivation(*store, got, NoRepair, true);
ParsedDerivation parsedDrv(drvPath, got);
DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv);
StringSet systemFeatures{"rainbow", "uid-range"};
EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "sandcastle");
EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), true);
EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings{"/usr/bin/ditto"});
EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings{"UNICORN"});
EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), true);
EXPECT_EQ(
parsedDrv.getStringsAttr("allowedReferences"), Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(
parsedDrv.getStringsAttr("allowedRequisites"), Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(
parsedDrv.getStringsAttr("disallowedReferences"),
Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
EXPECT_EQ(
parsedDrv.getStringsAttr("disallowedRequisites"),
Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), systemFeatures);
EXPECT_EQ(parsedDrv.canBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.willBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.substitutesAllowed(), false);
EXPECT_EQ(parsedDrv.useUidRange(), true);
EXPECT_TRUE(!parsedDrv.hasStructuredAttrs());
EXPECT_EQ(options.additionalSandboxProfile, "sandcastle");
EXPECT_EQ(options.noChroot, true);
EXPECT_EQ(options.impureHostDeps, StringSet{"/usr/bin/ditto"});
EXPECT_EQ(options.impureEnvVars, StringSet{"UNICORN"});
EXPECT_EQ(options.allowLocalNetworking, true);
{
auto * checksForAllOutputs_ = std::get_if<0>(&options.outputChecks);
ASSERT_TRUE(checksForAllOutputs_ != nullptr);
auto & checksForAllOutputs = *checksForAllOutputs_;
EXPECT_EQ(
checksForAllOutputs.allowedReferences, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(
checksForAllOutputs.allowedRequisites, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(
checksForAllOutputs.disallowedReferences, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
EXPECT_EQ(
checksForAllOutputs.disallowedRequisites, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
}
EXPECT_EQ(options.getRequiredSystemFeatures(got), systemFeatures);
EXPECT_EQ(options.canBuildLocally(*store, got), false);
EXPECT_EQ(options.willBuildLocally(*store, got), false);
EXPECT_EQ(options.substitutesAllowed(), false);
EXPECT_EQ(options.useUidRange(got), true);
});
};
@ -140,27 +158,29 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_structuredAttr
auto drvPath = writeDerivation(*store, got, NoRepair, true);
ParsedDerivation parsedDrv(drvPath, got);
DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv);
EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "");
EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), false);
EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings());
EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings());
EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), false);
EXPECT_TRUE(parsedDrv.hasStructuredAttrs());
EXPECT_EQ(options.additionalSandboxProfile, "");
EXPECT_EQ(options.noChroot, false);
EXPECT_EQ(options.impureHostDeps, StringSet{});
EXPECT_EQ(options.impureEnvVars, StringSet{});
EXPECT_EQ(options.allowLocalNetworking, false);
{
auto structuredAttrs_ = parsedDrv.getStructuredAttrs();
ASSERT_TRUE(structuredAttrs_);
auto & structuredAttrs = *structuredAttrs_;
auto * checksPerOutput_ = std::get_if<1>(&options.outputChecks);
ASSERT_TRUE(checksPerOutput_ != nullptr);
auto & checksPerOutput = *checksPerOutput_;
auto outputChecks_ = get(structuredAttrs, "outputChecks");
ASSERT_FALSE(outputChecks_);
EXPECT_EQ(checksPerOutput.size(), 0);
}
EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), StringSet());
EXPECT_EQ(parsedDrv.canBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.willBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.substitutesAllowed(), true);
EXPECT_EQ(parsedDrv.useUidRange(), false);
EXPECT_EQ(options.getRequiredSystemFeatures(got), StringSet());
EXPECT_EQ(options.canBuildLocally(*store, got), false);
EXPECT_EQ(options.willBuildLocally(*store, got), false);
EXPECT_EQ(options.substitutesAllowed(), true);
EXPECT_EQ(options.useUidRange(got), false);
});
};
@ -172,62 +192,52 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_structuredAttr
auto drvPath = writeDerivation(*store, got, NoRepair, true);
ParsedDerivation parsedDrv(drvPath, got);
DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv);
StringSet systemFeatures{"rainbow", "uid-range"};
EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "sandcastle");
EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), true);
EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings{"/usr/bin/ditto"});
EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings{"UNICORN"});
EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), true);
EXPECT_TRUE(parsedDrv.hasStructuredAttrs());
EXPECT_EQ(options.additionalSandboxProfile, "sandcastle");
EXPECT_EQ(options.noChroot, true);
EXPECT_EQ(options.impureHostDeps, StringSet{"/usr/bin/ditto"});
EXPECT_EQ(options.impureEnvVars, StringSet{"UNICORN"});
EXPECT_EQ(options.allowLocalNetworking, true);
{
auto structuredAttrs_ = parsedDrv.getStructuredAttrs();
ASSERT_TRUE(structuredAttrs_);
auto & structuredAttrs = *structuredAttrs_;
auto outputChecks_ = get(structuredAttrs, "outputChecks");
ASSERT_TRUE(outputChecks_);
auto & outputChecks = *outputChecks_;
{
auto output_ = get(outputChecks, "out");
auto output_ = get(std::get<1>(options.outputChecks), "out");
ASSERT_TRUE(output_);
auto & output = *output_;
EXPECT_EQ(
get(output, "allowedReferences")->get<Strings>(),
Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(
get(output, "allowedRequisites")->get<Strings>(),
Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(output.allowedReferences, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
EXPECT_EQ(output.allowedRequisites, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"});
}
{
auto output_ = get(outputChecks, "bin");
auto output_ = get(std::get<1>(options.outputChecks), "bin");
ASSERT_TRUE(output_);
auto & output = *output_;
EXPECT_EQ(
get(output, "disallowedReferences")->get<Strings>(),
Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
EXPECT_EQ(
get(output, "disallowedRequisites")->get<Strings>(),
Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
EXPECT_EQ(output.disallowedReferences, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
EXPECT_EQ(output.disallowedRequisites, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"});
}
{
auto output_ = get(outputChecks, "dev");
auto output_ = get(std::get<1>(options.outputChecks), "dev");
ASSERT_TRUE(output_);
auto & output = *output_;
EXPECT_EQ(get(output, "maxSize")->get<uint64_t>(), 789);
EXPECT_EQ(get(output, "maxClosureSize")->get<uint64_t>(), 5909);
EXPECT_EQ(output.maxSize, 789);
EXPECT_EQ(output.maxClosureSize, 5909);
}
}
EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), systemFeatures);
EXPECT_EQ(parsedDrv.canBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.willBuildLocally(*store), false);
EXPECT_EQ(parsedDrv.substitutesAllowed(), false);
EXPECT_EQ(parsedDrv.useUidRange(), true);
EXPECT_EQ(options.getRequiredSystemFeatures(got), systemFeatures);
EXPECT_EQ(options.canBuildLocally(*store, got), false);
EXPECT_EQ(options.willBuildLocally(*store, got), false);
EXPECT_EQ(options.substitutesAllowed(), false);
EXPECT_EQ(options.useUidRange(got), true);
});
};

View file

@ -52,14 +52,6 @@ mkMesonExecutable (finalAttrs: {
nix-store-test-support
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -0,0 +1,126 @@
#include "derivation-creation-and-realisation-goal.hh"
#include "worker.hh"
namespace nix {
DerivationCreationAndRealisationGoal::DerivationCreationAndRealisationGoal(
ref<SingleDerivedPath> drvReq, const OutputsSpec & wantedOutputs, Worker & worker, BuildMode buildMode)
: Goal(worker, DerivedPath::Built{.drvPath = drvReq, .outputs = wantedOutputs})
, drvReq(drvReq)
, wantedOutputs(wantedOutputs)
, buildMode(buildMode)
{
name =
fmt("outer obtaining drv from '%s' and then building outputs %s",
drvReq->to_string(worker.store),
std::visit(
overloaded{
[&](const OutputsSpec::All) -> std::string { return "* (all of them)"; },
[&](const OutputsSpec::Names os) { return concatStringsSep(", ", quoteStrings(os)); },
},
wantedOutputs.raw));
trace("created outer");
worker.updateProgress();
}
DerivationCreationAndRealisationGoal::~DerivationCreationAndRealisationGoal() {}
static StorePath pathPartOfReq(const SingleDerivedPath & req)
{
return std::visit(
overloaded{
[&](const SingleDerivedPath::Opaque & bo) { return bo.path; },
[&](const SingleDerivedPath::Built & bfd) { return pathPartOfReq(*bfd.drvPath); },
},
req.raw());
}
std::string DerivationCreationAndRealisationGoal::key()
{
/* Ensure that derivations get built in order of their name,
i.e. a derivation named "aardvark" always comes before "baboon". And
substitution goals and inner derivation goals always happen before
derivation goals (due to "b$"). */
return "c$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + drvReq->to_string(worker.store);
}
void DerivationCreationAndRealisationGoal::timedOut(Error && ex) {}
void DerivationCreationAndRealisationGoal::addWantedOutputs(const OutputsSpec & outputs)
{
/* If we already want all outputs, there is nothing to do. */
auto newWanted = wantedOutputs.union_(outputs);
bool needRestart = !newWanted.isSubsetOf(wantedOutputs);
wantedOutputs = newWanted;
if (!needRestart)
return;
if (!optDrvPath)
// haven't started steps where the outputs matter yet
return;
worker.makeDerivationGoal(*optDrvPath, outputs, buildMode);
}
Goal::Co DerivationCreationAndRealisationGoal::init()
{
trace("outer init");
/* The first thing to do is to make sure that the derivation
exists. If it doesn't, it may be created through a
substitute. */
if (auto optDrvPath = [this]() -> std::optional<StorePath> {
if (buildMode != bmNormal)
return std::nullopt;
auto drvPath = StorePath::dummy;
try {
drvPath = resolveDerivedPath(worker.store, *drvReq);
} catch (MissingRealisation &) {
return std::nullopt;
}
auto cond = worker.evalStore.isValidPath(drvPath) || worker.store.isValidPath(drvPath);
return cond ? std::optional{drvPath} : std::nullopt;
}()) {
trace(
fmt("already have drv '%s' for '%s', can go straight to building",
worker.store.printStorePath(*optDrvPath),
drvReq->to_string(worker.store)));
} else {
trace("need to obtain drv we want to build");
addWaitee(worker.makeGoal(DerivedPath::fromSingle(*drvReq)));
co_await Suspend{};
}
trace("outer load and build derivation");
if (nrFailed != 0) {
co_return amDone(ecFailed, Error("cannot build missing derivation '%s'", drvReq->to_string(worker.store)));
}
StorePath drvPath = resolveDerivedPath(worker.store, *drvReq);
/* Build this step! */
concreteDrvGoal = worker.makeDerivationGoal(drvPath, wantedOutputs, buildMode);
{
auto g = upcast_goal(concreteDrvGoal);
/* We will finish with it ourselves, as if we were the derivational goal. */
g->preserveException = true;
}
optDrvPath = std::move(drvPath);
addWaitee(upcast_goal(concreteDrvGoal));
co_await Suspend{};
trace("outer build done");
buildResult = upcast_goal(concreteDrvGoal)
->getBuildResult(DerivedPath::Built{
.drvPath = drvReq,
.outputs = wantedOutputs,
});
auto g = upcast_goal(concreteDrvGoal);
co_return amDone(g->exitCode, g->ex);
}
}

View file

@ -0,0 +1,88 @@
#pragma once
#include "parsed-derivations.hh"
#include "store-api.hh"
#include "pathlocks.hh"
#include "goal.hh"
namespace nix {
struct DerivationGoal;
/**
* This goal type is essentially the serial composition (like function
* composition) of a goal for getting a derivation, and then a
* `DerivationGoal` using the newly-obtained derivation.
*
* In the (currently experimental) general inductive case of derivations
* that are themselves build outputs, that first goal will be *another*
* `DerivationCreationAndRealisationGoal`. In the (much more common) base-case
* where the derivation has no provence and is just referred to by
* (content-addressed) store path, that first goal is a
* `SubstitutionGoal`.
*
* If we already have the derivation (e.g. if the evaluator has created
* the derivation locally and then instructured the store to build it),
* we can skip the first goal entirely as a small optimization.
*/
struct DerivationCreationAndRealisationGoal : public Goal
{
/**
* How to obtain a store path of the derivation to build.
*/
ref<SingleDerivedPath> drvReq;
/**
* The path of the derivation, once obtained.
**/
std::optional<StorePath> optDrvPath;
/**
* The goal for the corresponding concrete derivation.
**/
std::shared_ptr<DerivationGoal> concreteDrvGoal;
/**
* The specific outputs that we need to build.
*/
OutputsSpec wantedOutputs;
/**
* The final output paths of the build.
*
* - For input-addressed derivations, always the precomputed paths
*
* - For content-addressed derivations, calcuated from whatever the
* hash ends up being. (Note that fixed outputs derivations that
* produce the "wrong" output still install that data under its
* true content-address.)
*/
OutputPathMap finalOutputs;
BuildMode buildMode;
DerivationCreationAndRealisationGoal(
ref<SingleDerivedPath> drvReq,
const OutputsSpec & wantedOutputs,
Worker & worker,
BuildMode buildMode = bmNormal);
virtual ~DerivationCreationAndRealisationGoal();
void timedOut(Error && ex) override;
std::string key() override;
/**
* Add wanted outputs to an already existing derivation goal.
*/
void addWantedOutputs(const OutputsSpec & outputs);
Co init() override;
JobCategory jobCategory() const override
{
return JobCategory::Administration;
};
};
}

View file

@ -137,21 +137,8 @@ Goal::Co DerivationGoal::init() {
trace("init");
if (useDerivation) {
/* The first thing to do is to make sure that the derivation
exists. If it doesn't, it may be created through a
substitute. */
if (buildMode != bmNormal || !worker.evalStore.isValidPath(drvPath)) {
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath)));
co_await Suspend{};
}
trace("loading derivation");
if (nrFailed != 0) {
co_return done(BuildResult::MiscFailure, {}, Error("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath)));
}
/* `drvPath' should already be a root, but let's be on the safe
side: if the user forgot to make it a root, we wouldn't want
things being garbage collected while we're busy. */
@ -181,6 +168,7 @@ Goal::Co DerivationGoal::haveDerivation()
trace("have derivation");
parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv);
drvOptions = std::make_unique<DerivationOptions>(DerivationOptions::fromParsedDerivation(*parsedDrv));
if (!drv->type().hasKnownOutputPaths())
experimentalFeatureSettings.require(Xp::CaDerivations);
@ -237,7 +225,7 @@ Goal::Co DerivationGoal::haveDerivation()
/* We are first going to try to create the invalid output paths
through substitutes. If that doesn't work, we'll build
them. */
if (settings.useSubstitutes && parsedDrv->substitutesAllowed())
if (settings.useSubstitutes && drvOptions->substitutesAllowed())
for (auto & [outputName, status] : initialOutputs) {
if (!status.wanted) continue;
if (!status.known)
@ -627,7 +615,7 @@ Goal::Co DerivationGoal::tryToBuild()
`preferLocalBuild' set. Also, check and repair modes are only
supported for local builds. */
bool buildLocally =
(buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store))
(buildMode != bmNormal || drvOptions->willBuildLocally(worker.store, *drv))
&& settings.maxBuildJobs.get() != 0;
if (!buildLocally) {
@ -1123,7 +1111,7 @@ HookReply DerivationGoal::tryBuildHook()
<< (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0)
<< drv->platform
<< worker.store.printStorePath(drvPath)
<< parsedDrv->getRequiredSystemFeatures();
<< drvOptions->getRequiredSystemFeatures(*drv);
worker.hook->sink.flush();
/* Read the first line of input, which should be a word indicating
@ -1552,23 +1540,24 @@ void DerivationGoal::waiteeDone(GoalPtr waitee, ExitCode result)
if (!useDerivation || !drv) return;
auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
auto * dg = dynamic_cast<DerivationGoal *>(&*waitee);
if (!dg) return;
std::optional info = tryGetConcreteDrvGoal(waitee);
if (!info) return;
const auto & [dg, drvReq] = *info;
auto * nodeP = fullDrv.inputDrvs.findSlot(DerivedPath::Opaque { .path = dg->drvPath });
auto * nodeP = fullDrv.inputDrvs.findSlot(drvReq.get());
if (!nodeP) return;
auto & outputs = nodeP->value;
for (auto & outputName : outputs) {
auto buildResult = dg->getBuildResult(DerivedPath::Built {
.drvPath = makeConstantStorePathRef(dg->drvPath),
auto buildResult = dg.get().getBuildResult(DerivedPath::Built {
.drvPath = makeConstantStorePathRef(dg.get().drvPath),
.outputs = OutputsSpec::Names { outputName },
});
if (buildResult.success()) {
auto i = buildResult.builtOutputs.find(outputName);
if (i != buildResult.builtOutputs.end())
inputDrvOutputs.insert_or_assign(
{ dg->drvPath, outputName },
{ dg.get().drvPath, outputName },
i->second.outPath);
}
}

View file

@ -2,6 +2,7 @@
///@file
#include "parsed-derivations.hh"
#include "derivation-options.hh"
#ifndef _WIN32
# include "user-lock.hh"
#endif
@ -56,6 +57,10 @@ struct InitialOutput {
/**
* A goal for building some or all of the outputs of a derivation.
*
* The derivation must already be present, either in the store in a drv
* or in memory. If the derivation itself needs to be gotten first, a
* `DerivationCreationAndRealisationGoal` goal must be used instead.
*/
struct DerivationGoal : public Goal
{
@ -143,6 +148,7 @@ struct DerivationGoal : public Goal
std::unique_ptr<Derivation> drv;
std::unique_ptr<ParsedDerivation> parsedDrv;
std::unique_ptr<DerivationOptions> drvOptions;
/**
* The remainder is state held during the build.

View file

@ -1,6 +1,7 @@
#include "worker.hh"
#include "substitution-goal.hh"
#ifndef _WIN32 // TODO Enable building on Windows
# include "derivation-creation-and-realisation-goal.hh"
# include "derivation-goal.hh"
#endif
#include "local-store.hh"
@ -29,8 +30,8 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
}
if (i->exitCode != Goal::ecSuccess) {
#ifndef _WIN32 // TODO Enable building on Windows
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get()))
failed.insert(printStorePath(i2->drvPath));
if (auto i2 = dynamic_cast<DerivationCreationAndRealisationGoal *>(i.get()))
failed.insert(i2->drvReq->to_string(*this));
else
#endif
if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get()))

View file

@ -175,7 +175,7 @@ Goal::Done Goal::amDone(ExitCode result, std::optional<Error> ex)
exitCode = result;
if (ex) {
if (!waiters.empty())
if (!preserveException && !waiters.empty())
logError(ex->info());
else
this->ex = std::move(*ex);

View file

@ -50,6 +50,16 @@ enum struct JobCategory {
* A substitution an arbitrary store object; it will use network resources.
*/
Substitution,
/**
* A goal that does no "real" work by itself, and just exists to depend on
* other goals which *do* do real work. These goals therefore are not
* limited.
*
* These goals cannot infinitely create themselves, so there is no risk of
* a "fork bomb" type situation (which would be a problem even though the
* goal do no real work) either.
*/
Administration,
};
struct Goal : public std::enable_shared_from_this<Goal>
@ -373,6 +383,17 @@ public:
*/
BuildResult getBuildResult(const DerivedPath &) const;
/**
* Hack to say that this goal should not log `ex`, but instead keep
* it around. Set by a waitee which sees itself as the designated
* continuation of this goal, responsible for reporting its
* successes or failures.
*
* @todo this is yet another not-nice hack in the goal system that
* we ought to get rid of. See #11927
*/
bool preserveException = false;
/**
* Exception containing an error message, if any.
*/

View file

@ -4,6 +4,7 @@
#include "substitution-goal.hh"
#include "drv-output-substitution-goal.hh"
#include "derivation-goal.hh"
#include "derivation-creation-and-realisation-goal.hh"
#ifndef _WIN32 // TODO Enable building on Windows
# include "local-derivation-goal.hh"
# include "hook-instance.hh"
@ -43,6 +44,24 @@ Worker::~Worker()
}
std::shared_ptr<DerivationCreationAndRealisationGoal> Worker::makeDerivationCreationAndRealisationGoal(
ref<SingleDerivedPath> drvReq,
const OutputsSpec & wantedOutputs,
BuildMode buildMode)
{
std::weak_ptr<DerivationCreationAndRealisationGoal> & goal_weak = outerDerivationGoals.ensureSlot(*drvReq).value;
std::shared_ptr<DerivationCreationAndRealisationGoal> goal = goal_weak.lock();
if (!goal) {
goal = std::make_shared<DerivationCreationAndRealisationGoal>(drvReq, wantedOutputs, *this, buildMode);
goal_weak = goal;
wakeUp(goal);
} else {
goal->addWantedOutputs(wantedOutputs);
}
return goal;
}
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
const StorePath & drvPath,
const OutputsSpec & wantedOutputs,
@ -120,10 +139,7 @@ GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
{
return std::visit(overloaded {
[&](const DerivedPath::Built & bfd) -> GoalPtr {
if (auto bop = std::get_if<DerivedPath::Opaque>(&*bfd.drvPath))
return makeDerivationGoal(bop->path, bfd.outputs, buildMode);
else
throw UnimplementedError("Building dynamic derivations in one shot is not yet implemented.");
return makeDerivationCreationAndRealisationGoal(bfd.drvPath, bfd.outputs, buildMode);
},
[&](const DerivedPath::Opaque & bo) -> GoalPtr {
return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
@ -132,24 +148,46 @@ GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
}
template<typename K, typename V, typename F>
static void cullMap(std::map<K, V> & goalMap, F f)
{
for (auto i = goalMap.begin(); i != goalMap.end();)
if (!f(i->second))
i = goalMap.erase(i);
else ++i;
}
template<typename K, typename G>
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
{
/* !!! inefficient */
for (auto i = goalMap.begin();
i != goalMap.end(); )
if (i->second.lock() == goal) {
auto j = i; ++j;
goalMap.erase(i);
i = j;
}
else ++i;
cullMap(goalMap, [&](const std::weak_ptr<G> & gp) -> bool {
return gp.lock() != goal;
});
}
template<typename K>
static void removeGoal(std::shared_ptr<DerivationCreationAndRealisationGoal> goal, std::map<K, DerivedPathMap<std::weak_ptr<DerivationCreationAndRealisationGoal>>::ChildNode> & goalMap);
template<typename K>
static void removeGoal(std::shared_ptr<DerivationCreationAndRealisationGoal> goal, std::map<K, DerivedPathMap<std::weak_ptr<DerivationCreationAndRealisationGoal>>::ChildNode> & goalMap)
{
/* !!! inefficient */
cullMap(goalMap, [&](DerivedPathMap<std::weak_ptr<DerivationCreationAndRealisationGoal>>::ChildNode & node) -> bool {
if (node.value.lock() == goal)
node.value.reset();
removeGoal(goal, node.childMap);
return !node.value.expired() || !node.childMap.empty();
});
}
void Worker::removeGoal(GoalPtr goal)
{
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
if (auto drvGoal = std::dynamic_pointer_cast<DerivationCreationAndRealisationGoal>(goal))
nix::removeGoal(drvGoal, outerDerivationGoals.map);
else if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
nix::removeGoal(drvGoal, derivationGoals);
else
if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
@ -215,6 +253,9 @@ void Worker::childStarted(GoalPtr goal, const std::set<MuxablePipePollState::Com
case JobCategory::Build:
nrLocalBuilds++;
break;
case JobCategory::Administration:
/* Intentionally not limited, see docs */
break;
default:
unreachable();
}
@ -238,6 +279,9 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
assert(nrLocalBuilds > 0);
nrLocalBuilds--;
break;
case JobCategory::Administration:
/* Intentionally not limited, see docs */
break;
default:
unreachable();
}
@ -290,9 +334,9 @@ void Worker::run(const Goals & _topGoals)
for (auto & i : _topGoals) {
topGoals.insert(i);
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
if (auto goal = dynamic_cast<DerivationCreationAndRealisationGoal *>(i.get())) {
topPaths.push_back(DerivedPath::Built {
.drvPath = makeConstantStorePathRef(goal->drvPath),
.drvPath = goal->drvReq,
.outputs = goal->wantedOutputs,
});
} else
@ -552,4 +596,22 @@ GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal)
return subGoal;
}
GoalPtr upcast_goal(std::shared_ptr<DerivationGoal> subGoal)
{
return subGoal;
}
std::optional<std::pair<std::reference_wrapper<const DerivationGoal>, std::reference_wrapper<const SingleDerivedPath>>> tryGetConcreteDrvGoal(GoalPtr waitee)
{
auto * odg = dynamic_cast<DerivationCreationAndRealisationGoal *>(&*waitee);
if (!odg) return std::nullopt;
/* If we failed to obtain the concrete drv, we won't have created
the concrete derivation goal. */
if (!odg->concreteDrvGoal) return std::nullopt;
return {{
std::cref(*odg->concreteDrvGoal),
std::cref(*odg->drvReq),
}};
}
}

View file

@ -3,6 +3,7 @@
#include "types.hh"
#include "store-api.hh"
#include "derived-path-map.hh"
#include "goal.hh"
#include "realisation.hh"
#include "muxable-pipe.hh"
@ -13,6 +14,7 @@
namespace nix {
/* Forward definition. */
struct DerivationCreationAndRealisationGoal;
struct DerivationGoal;
struct PathSubstitutionGoal;
class DrvOutputSubstitutionGoal;
@ -31,9 +33,25 @@ class DrvOutputSubstitutionGoal;
*/
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
GoalPtr upcast_goal(std::shared_ptr<DerivationGoal> subGoal);
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
/**
* The current implementation of impure derivations has
* `DerivationGoal`s accumulate realisations from their waitees.
* Unfortunately, `DerivationGoal`s don't directly depend on other
* goals, but instead depend on `DerivationCreationAndRealisationGoal`s.
*
* We try not to share any of the details of any goal type with any
* other, for sake of modularity and quicker rebuilds. This means we
* cannot "just" downcast and fish out the field. So as an escape hatch,
* we have made the function, written in `worker.cc` where all the goal
* types are visible, and use it instead.
*/
std::optional<std::pair<std::reference_wrapper<const DerivationGoal>, std::reference_wrapper<const SingleDerivedPath>>> tryGetConcreteDrvGoal(GoalPtr waitee);
/**
* A mapping used to remember for each child process to what goal it
* belongs, and comm channels for receiving log data and output
@ -103,6 +121,9 @@ private:
* Maps used to prevent multiple instantiations of a goal for the
* same derivation / path.
*/
DerivedPathMap<std::weak_ptr<DerivationCreationAndRealisationGoal>> outerDerivationGoals;
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
@ -196,6 +217,9 @@ public:
* @ref DerivationGoal "derivation goal"
*/
private:
std::shared_ptr<DerivationCreationAndRealisationGoal> makeDerivationCreationAndRealisationGoal(
ref<SingleDerivedPath> drvPath,
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
const StorePath & drvPath, const OutputsSpec & wantedOutputs,
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal);

View file

@ -1041,11 +1041,15 @@ void processConnection(
conn.protoVersion = protoVersion;
conn.features = features;
auto tunnelLogger = new TunnelLogger(conn.to, protoVersion);
auto prevLogger = nix::logger;
auto tunnelLogger_ = std::make_unique<TunnelLogger>(conn.to, protoVersion);
auto tunnelLogger = tunnelLogger_.get();
std::unique_ptr<Logger> prevLogger_;
auto prevLogger = logger.get();
// FIXME
if (!recursive)
logger = tunnelLogger;
if (!recursive) {
prevLogger_ = std::move(logger);
logger = std::move(tunnelLogger_);
}
unsigned int opCount = 0;

View file

@ -0,0 +1,274 @@
#include "derivation-options.hh"
#include "json-utils.hh"
#include "parsed-derivations.hh"
#include "types.hh"
#include "util.hh"
#include <optional>
#include <string>
#include <variant>
namespace nix {
using OutputChecks = DerivationOptions::OutputChecks;
using OutputChecksVariant = std::variant<OutputChecks, std::map<std::string, OutputChecks>>;
DerivationOptions DerivationOptions::fromParsedDerivation(const ParsedDerivation & parsed, bool shouldWarn)
{
DerivationOptions defaults = {};
auto structuredAttrs = parsed.structuredAttrs.get();
if (shouldWarn && structuredAttrs) {
if (get(*structuredAttrs, "allowedReferences")) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "allowedRequisites")) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "disallowedRequisites")) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "disallowedReferences")) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "maxSize")) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "maxClosureSize")) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead");
}
}
return {
.outputChecks = [&]() -> OutputChecksVariant {
if (auto structuredAttrs = parsed.structuredAttrs.get()) {
std::map<std::string, OutputChecks> res;
if (auto outputChecks = get(*structuredAttrs, "outputChecks")) {
for (auto & [outputName, output] : getObject(*outputChecks)) {
OutputChecks checks;
if (auto maxSize = get(output, "maxSize"))
checks.maxSize = maxSize->get<uint64_t>();
if (auto maxClosureSize = get(output, "maxClosureSize"))
checks.maxClosureSize = maxClosureSize->get<uint64_t>();
auto get_ = [&](const std::string & name) -> std::optional<StringSet> {
if (auto i = get(output, name)) {
StringSet res;
for (auto j = i->begin(); j != i->end(); ++j) {
if (!j->is_string())
throw Error("attribute '%s' must be a list of strings", name);
res.insert(j->get<std::string>());
}
checks.disallowedRequisites = res;
return res;
}
return {};
};
checks.allowedReferences = get_("allowedReferences");
checks.allowedRequisites = get_("allowedRequisites");
checks.disallowedReferences = get_("disallowedReferences").value_or(StringSet{});
checks.disallowedRequisites = get_("disallowedRequisites").value_or(StringSet{});
;
res.insert_or_assign(outputName, std::move(checks));
}
}
return res;
} else {
return OutputChecks{
// legacy non-structured-attributes case
.ignoreSelfRefs = true,
.allowedReferences = parsed.getStringSetAttr("allowedReferences"),
.disallowedReferences = parsed.getStringSetAttr("disallowedReferences").value_or(StringSet{}),
.allowedRequisites = parsed.getStringSetAttr("allowedRequisites"),
.disallowedRequisites = parsed.getStringSetAttr("disallowedRequisites").value_or(StringSet{}),
};
}
}(),
.unsafeDiscardReferences =
[&] {
std::map<std::string, bool> res;
if (auto structuredAttrs = parsed.structuredAttrs.get()) {
if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) {
for (auto & [outputName, output] : getObject(*udr)) {
if (!output.is_boolean())
throw Error("attribute 'unsafeDiscardReferences.\"%s\"' must be a Boolean", outputName);
res.insert_or_assign(outputName, output.get<bool>());
}
}
}
return res;
}(),
.passAsFile =
[&] {
StringSet res;
if (auto * passAsFileString = get(parsed.drv.env, "passAsFile")) {
if (parsed.hasStructuredAttrs()) {
if (shouldWarn) {
warn(
"'structuredAttrs' disables the effect of the top-level attribute 'passAsFile'; because all JSON is always passed via file");
}
} else {
res = tokenizeString<StringSet>(*passAsFileString);
}
}
return res;
}(),
.additionalSandboxProfile =
parsed.getStringAttr("__sandboxProfile").value_or(defaults.additionalSandboxProfile),
.noChroot = parsed.getBoolAttr("__noChroot", defaults.noChroot),
.impureHostDeps = parsed.getStringSetAttr("__impureHostDeps").value_or(defaults.impureHostDeps),
.impureEnvVars = parsed.getStringSetAttr("impureEnvVars").value_or(defaults.impureEnvVars),
.allowLocalNetworking = parsed.getBoolAttr("__darwinAllowLocalNetworking", defaults.allowLocalNetworking),
.requiredSystemFeatures =
parsed.getStringSetAttr("requiredSystemFeatures").value_or(defaults.requiredSystemFeatures),
.preferLocalBuild = parsed.getBoolAttr("preferLocalBuild", defaults.preferLocalBuild),
.allowSubstitutes = parsed.getBoolAttr("allowSubstitutes", defaults.allowSubstitutes),
};
}
StringSet DerivationOptions::getRequiredSystemFeatures(const BasicDerivation & drv) const
{
// FIXME: cache this?
StringSet res;
for (auto & i : requiredSystemFeatures)
res.insert(i);
if (!drv.type().hasKnownOutputPaths())
res.insert("ca-derivations");
return res;
}
bool DerivationOptions::canBuildLocally(Store & localStore, const BasicDerivation & drv) const
{
if (drv.platform != settings.thisSystem.get() && !settings.extraPlatforms.get().count(drv.platform)
&& !drv.isBuiltin())
return false;
if (settings.maxBuildJobs.get() == 0 && !drv.isBuiltin())
return false;
for (auto & feature : getRequiredSystemFeatures(drv))
if (!localStore.systemFeatures.get().count(feature))
return false;
return true;
}
bool DerivationOptions::willBuildLocally(Store & localStore, const BasicDerivation & drv) const
{
return preferLocalBuild && canBuildLocally(localStore, drv);
}
bool DerivationOptions::substitutesAllowed() const
{
return settings.alwaysAllowSubstitutes ? true : allowSubstitutes;
}
bool DerivationOptions::useUidRange(const BasicDerivation & drv) const
{
return getRequiredSystemFeatures(drv).count("uid-range");
}
}
namespace nlohmann {
using namespace nix;
DerivationOptions adl_serializer<DerivationOptions>::from_json(const json & json)
{
return {
.outputChecks = [&]() -> OutputChecksVariant {
auto outputChecks = getObject(valueAt(json, "outputChecks"));
auto forAllOutputsOpt = optionalValueAt(outputChecks, "forAllOutputs");
auto perOutputOpt = optionalValueAt(outputChecks, "perOutput");
if (forAllOutputsOpt && !perOutputOpt) {
return static_cast<OutputChecks>(*forAllOutputsOpt);
} else if (perOutputOpt && !forAllOutputsOpt) {
return static_cast<std::map<std::string, OutputChecks>>(*perOutputOpt);
} else {
throw Error("Exactly one of 'perOutput' or 'forAllOutputs' is required");
}
}(),
.unsafeDiscardReferences = valueAt(json, "unsafeDiscardReferences"),
.passAsFile = getStringSet(valueAt(json, "passAsFile")),
.additionalSandboxProfile = getString(valueAt(json, "additionalSandboxProfile")),
.noChroot = getBoolean(valueAt(json, "noChroot")),
.impureHostDeps = getStringSet(valueAt(json, "impureHostDeps")),
.impureEnvVars = getStringSet(valueAt(json, "impureEnvVars")),
.allowLocalNetworking = getBoolean(valueAt(json, "allowLocalNetworking")),
.requiredSystemFeatures = getStringSet(valueAt(json, "requiredSystemFeatures")),
.preferLocalBuild = getBoolean(valueAt(json, "preferLocalBuild")),
.allowSubstitutes = getBoolean(valueAt(json, "allowSubstitutes")),
};
}
void adl_serializer<DerivationOptions>::to_json(json & json, DerivationOptions o)
{
json["outputChecks"] = std::visit(
overloaded{
[&](const OutputChecks & checks) {
nlohmann::json outputChecks;
outputChecks["forAllOutputs"] = checks;
return outputChecks;
},
[&](const std::map<std::string, OutputChecks> & checksPerOutput) {
nlohmann::json outputChecks;
outputChecks["perOutput"] = checksPerOutput;
return outputChecks;
},
},
o.outputChecks);
json["unsafeDiscardReferences"] = o.unsafeDiscardReferences;
json["passAsFile"] = o.passAsFile;
json["additionalSandboxProfile"] = o.additionalSandboxProfile;
json["noChroot"] = o.noChroot;
json["impureHostDeps"] = o.impureHostDeps;
json["impureEnvVars"] = o.impureEnvVars;
json["allowLocalNetworking"] = o.allowLocalNetworking;
json["requiredSystemFeatures"] = o.requiredSystemFeatures;
json["preferLocalBuild"] = o.preferLocalBuild;
json["allowSubstitutes"] = o.allowSubstitutes;
}
DerivationOptions::OutputChecks adl_serializer<DerivationOptions::OutputChecks>::from_json(const json & json)
{
return {
.ignoreSelfRefs = getBoolean(valueAt(json, "ignoreSelfRefs")),
.allowedReferences = nullableValueAt(json, "allowedReferences"),
.disallowedReferences = getStringSet(valueAt(json, "disallowedReferences")),
.allowedRequisites = nullableValueAt(json, "allowedRequisites"),
.disallowedRequisites = getStringSet(valueAt(json, "disallowedRequisites")),
};
}
void adl_serializer<DerivationOptions::OutputChecks>::to_json(json & json, DerivationOptions::OutputChecks c)
{
json["ignoreSelfRefs"] = c.ignoreSelfRefs;
json["allowedReferences"] = c.allowedReferences;
json["disallowedReferences"] = c.disallowedReferences;
json["allowedRequisites"] = c.allowedRequisites;
json["disallowedRequisites"] = c.disallowedRequisites;
}
}

View file

@ -0,0 +1,185 @@
#pragma once
///@file
#include <cstdint>
#include <nlohmann/json.hpp>
#include <optional>
#include <variant>
#include "types.hh"
#include "json-impls.hh"
namespace nix {
class Store;
struct BasicDerivation;
class ParsedDerivation;
/**
* This represents all the special options on a `Derivation`.
*
* Currently, these options are parsed from the environment variables
* with the aid of `ParsedDerivation`.
*
* The first goal of this data type is to make sure that no other code
* uses `ParsedDerivation` to ad-hoc parse some additional options. That
* ensures this data type is up to date and fully correct.
*
* The second goal of this data type is to allow an alternative to
* hackily parsing the options from the environment variables. The ATerm
* format cannot change, but in alternatives to it (like the JSON
* format), we have the option of instead storing the options
* separately. That would be nice to separate concerns, and not make any
* environment variable names magical.
*/
struct DerivationOptions
{
struct OutputChecks
{
bool ignoreSelfRefs = false;
std::optional<uint64_t> maxSize, maxClosureSize;
/**
* env: allowedReferences
*
* A value of `nullopt` indicates that the check is skipped.
* This means that all references are allowed.
*/
std::optional<StringSet> allowedReferences;
/**
* env: disallowedReferences
*
* No needed for `std::optional`, because skipping the check is
* the same as disallowing the references.
*/
StringSet disallowedReferences;
/**
* env: allowedRequisites
*
* See `allowedReferences`
*/
std::optional<StringSet> allowedRequisites;
/**
* env: disallowedRequisites
*
* See `disallowedReferences`
*/
StringSet disallowedRequisites;
bool operator==(const OutputChecks &) const = default;
};
/**
* Either one set of checks for all outputs, or separate checks
* per-output.
*/
std::variant<OutputChecks, std::map<std::string, OutputChecks>> outputChecks = OutputChecks{};
/**
* Whether to avoid scanning for references for a given output.
*/
std::map<std::string, bool> unsafeDiscardReferences;
/**
* In non-structured mode, all bindings specified in the derivation
* go directly via the environment, except those listed in the
* passAsFile attribute. Those are instead passed as file names
* pointing to temporary files containing the contents.
*
* Note that passAsFile is ignored in structure mode because it's
* not needed (attributes are not passed through the environment, so
* there is no size constraint).
*/
StringSet passAsFile;
/**
* env: __sandboxProfile
*
* Just for Darwin
*/
std::string additionalSandboxProfile = "";
/**
* env: __noChroot
*
* Derivation would like to opt out of the sandbox.
*
* Builder is free to not respect this wish (because it is
* insecure) and fail the build instead.
*/
bool noChroot = false;
/**
* env: __impureHostDeps
*/
StringSet impureHostDeps = {};
/**
* env: impureEnvVars
*/
StringSet impureEnvVars = {};
/**
* env: __darwinAllowLocalNetworking
*
* Just for Darwin
*/
bool allowLocalNetworking = false;
/**
* env: requiredSystemFeatures
*/
StringSet requiredSystemFeatures = {};
/**
* env: preferLocalBuild
*/
bool preferLocalBuild = false;
/**
* env: allowSubstitutes
*/
bool allowSubstitutes = true;
bool operator==(const DerivationOptions &) const = default;
/**
* Parse this information from its legacy encoding as part of the
* environment. This should not be used with nice greenfield formats
* (e.g. JSON) but is necessary for supporing old formats (e.g.
* ATerm).
*/
static DerivationOptions fromParsedDerivation(const ParsedDerivation & parsed, bool shouldWarn = true);
/**
* @param drv Must be the same derivation we parsed this from. In
* the future we'll flip things around so a `BasicDerivation` has
* `DerivationOptions` instead.
*/
StringSet getRequiredSystemFeatures(const BasicDerivation & drv) const;
/**
* @param drv See note on `getRequiredSystemFeatures`
*/
bool canBuildLocally(Store & localStore, const BasicDerivation & drv) const;
/**
* @param drv See note on `getRequiredSystemFeatures`
*/
bool willBuildLocally(Store & localStore, const BasicDerivation & drv) const;
bool substitutesAllowed() const;
/**
* @param drv See note on `getRequiredSystemFeatures`
*/
bool useUidRange(const BasicDerivation & drv) const;
};
};
JSON_IMPL(DerivationOptions);
JSON_IMPL(DerivationOptions::OutputChecks)

View file

@ -52,6 +52,7 @@ typename DerivedPathMap<V>::ChildNode * DerivedPathMap<V>::findSlot(const Single
// instantiations
#include "derivation-creation-and-realisation-goal.hh"
namespace nix {
template<>
@ -68,4 +69,7 @@ std::strong_ordering DerivedPathMap<std::set<std::string>>::ChildNode::operator
template struct DerivedPathMap<std::set<std::string>>::ChildNode;
template struct DerivedPathMap<std::set<std::string>>;
template struct DerivedPathMap<std::weak_ptr<DerivationCreationAndRealisationGoal>>;
};

View file

@ -21,8 +21,11 @@ namespace nix {
*
* @param V A type to instantiate for each output. It should probably
* should be an "optional" type so not every interior node has to have a
* value. `* const Something` or `std::optional<Something>` would be
* good choices for "optional" types.
* value. For example, the scheduler uses
* `DerivedPathMap<std::weak_ptr<DerivationCreationAndRealisationGoal>>` to
* remember which goals correspond to which outputs. `* const Something`
* or `std::optional<Something>` would also be good choices for
* "optional" types.
*/
template<typename V>
struct DerivedPathMap {

View file

@ -94,7 +94,7 @@ struct curlFileTransfer : public FileTransfer
: fileTransfer(fileTransfer)
, request(request)
, act(*logger, lvlTalkative, actFileTransfer,
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
request.post ? "" : fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
{request.uri}, request.parentAct)
, callback(std::move(callback))
, finalSink([this](std::string_view data) {
@ -271,11 +271,21 @@ struct curlFileTransfer : public FileTransfer
return getInterrupted();
}
int silentProgressCallback(double dltotal, double dlnow)
{
return getInterrupted();
}
static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
{
return ((TransferItem *) userp)->progressCallback(dltotal, dlnow);
}
static int silentProgressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
{
return ((TransferItem *) userp)->silentProgressCallback(dltotal, dlnow);
}
static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
{
if (type == CURLINFO_TEXT)
@ -340,8 +350,11 @@ struct curlFileTransfer : public FileTransfer
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper);
curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this);
if (request.post)
curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, silentProgressCallbackWrapper);
else
curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, progressCallbackWrapper);
curl_easy_setopt(req, CURLOPT_XFERINFODATA, this);
curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
@ -353,7 +366,10 @@ struct curlFileTransfer : public FileTransfer
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
if (request.data) {
curl_easy_setopt(req, CURLOPT_UPLOAD, 1L);
if (request.post)
curl_easy_setopt(req, CURLOPT_POST, 1L);
else
curl_easy_setopt(req, CURLOPT_UPLOAD, 1L);
curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper);
curl_easy_setopt(req, CURLOPT_READDATA, this);
curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length());
@ -430,7 +446,8 @@ struct curlFileTransfer : public FileTransfer
if (httpStatus == 304 && result.etag == "")
result.etag = request.expectedETag;
act.progress(result.bodySize, result.bodySize);
if (!request.post)
act.progress(result.bodySize, result.bodySize);
done = true;
callback(std::move(result));
}

View file

@ -65,6 +65,7 @@ struct FileTransferRequest
std::string expectedETag;
bool verifyTLS = true;
bool head = false;
bool post = false;
size_t tries = fileTransferSettings.tries;
unsigned int baseRetryTimeMs = 250;
ActivityId parentAct;

View file

@ -65,7 +65,6 @@ Settings::Settings()
, nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
, nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
, nixUserConfFiles(getUserConfigFiles())
, nixManDir(canonPath(NIX_MAN_DIR))
, nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
{
#ifndef _WIN32
@ -243,7 +242,7 @@ Path Settings::getDefaultSSLCertFile()
return "";
}
const std::string nixVersion = PACKAGE_VERSION;
std::string nixVersion = PACKAGE_VERSION;
NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
{SandboxMode::smEnabled, true},

View file

@ -84,11 +84,6 @@ public:
*/
std::vector<Path> nixUserConfFiles;
/**
* The directory where the man pages are stored.
*/
Path nixManDir;
/**
* File name of the socket the daemon listens to.
*/
@ -1064,7 +1059,10 @@ public:
1. `NIX_SSL_CERT_FILE`
2. `SSL_CERT_FILE`
)"};
)",
{},
// Don't document the machine-specific default value
false};
#if __linux__
Setting<bool> filterSyscalls{
@ -1253,7 +1251,15 @@ void loadConfFile(AbstractConfig & config);
// Used by the Settings constructor
std::vector<Path> getUserConfigFiles();
extern const std::string nixVersion;
/**
* The version of Nix itself.
*
* This is not `const`, so that the Nix CLI can provide a more detailed version
* number including the git revision, without having to "re-compile" the entire
* set of Nix libraries to include that version, even when those libraries are
* not affected by the change.
*/
extern std::string nixVersion;
/**
* @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests.

View file

@ -69,7 +69,10 @@ ref<LegacySSHStore::Connection> LegacySSHStore::openConnection()
command.push_back("--store");
command.push_back(remoteStore.get());
}
conn->sshConn = master.startCommand(std::move(command));
conn->sshConn = master.startCommand(std::move(command), std::list{extraSshArgs});
if (connPipeSize) {
conn->sshConn->trySetBufferSize(*connPipeSize);
}
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
@ -100,19 +103,31 @@ std::string LegacySSHStore::getUri()
return *uriSchemes().begin() + "://" + host;
}
std::map<StorePath, UnkeyedValidPathInfo> LegacySSHStore::queryPathInfosUncached(
const StorePathSet & paths)
{
auto conn(connections->get());
/* No longer support missing NAR hash */
assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4);
debug("querying remote host '%s' for info on '%s'", host, concatStringsSep(", ", printStorePathSet(paths)));
auto infos = conn->queryPathInfos(*this, paths);
for (const auto & [_, info] : infos) {
if (info.narHash == Hash::dummy)
throw Error("NAR hash is now mandatory");
}
return infos;
}
void LegacySSHStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
try {
auto conn(connections->get());
/* No longer support missing NAR hash */
assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4);
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
auto infos = conn->queryPathInfos(*this, {path});
auto infos = queryPathInfosUncached({path});
switch (infos.size()) {
case 0:
@ -120,9 +135,6 @@ void LegacySSHStore::queryPathInfoUncached(const StorePath & path,
case 1: {
auto & [path2, info] = *infos.begin();
if (info.narHash == Hash::dummy)
throw Error("NAR hash is now mandatory");
assert(path == path2);
return callback(std::make_shared<ValidPathInfo>(
std::move(path),
@ -193,13 +205,19 @@ void LegacySSHStore::addToStore(const ValidPathInfo & info, Source & source,
void LegacySSHStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->narFromPath(*this, path, [&](auto & source) {
narFromPath(path, [&](auto & source) {
copyNAR(source, sink);
});
}
void LegacySSHStore::narFromPath(const StorePath & path, std::function<void(Source &)> fun)
{
auto conn(connections->get());
conn->narFromPath(*this, path, fun);
}
static ServeProto::BuildOptions buildSettings()
{
return {
@ -223,6 +241,19 @@ BuildResult LegacySSHStore::buildDerivation(const StorePath & drvPath, const Bas
return conn->getBuildDerivationResponse(*this);
}
std::function<BuildResult()> LegacySSHStore::buildDerivationAsync(
const StorePath & drvPath, const BasicDerivation & drv,
const ServeProto::BuildOptions & options)
{
// Until we have C++23 std::move_only_function
auto conn = std::make_shared<Pool<Connection>::Handle>(connections->get());
(*conn)->putBuildDerivationRequest(*this, drvPath, drv, options);
return [this,conn]() -> BuildResult {
return (*conn)->getBuildDerivationResponse(*this);
};
}
void LegacySSHStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
{
@ -294,6 +325,32 @@ StorePathSet LegacySSHStore::queryValidPaths(const StorePathSet & paths,
}
StorePathSet LegacySSHStore::queryValidPaths(const StorePathSet & paths,
bool lock, SubstituteFlag maybeSubstitute)
{
auto conn(connections->get());
return conn->queryValidPaths(*this,
lock, paths, maybeSubstitute);
}
void LegacySSHStore::addMultipleToStoreLegacy(Store & srcStore, const StorePathSet & paths)
{
auto conn(connections->get());
conn->to << ServeProto::Command::ImportPaths;
try {
srcStore.exportPaths(paths, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to.flush();
if (readInt(conn->from) != 1)
throw Error("remote machine failed to import closure");
}
void LegacySSHStore::connect()
{
auto conn(connections->get());
@ -307,6 +364,28 @@ unsigned int LegacySSHStore::getProtocol()
}
pid_t LegacySSHStore::getConnectionPid()
{
auto conn(connections->get());
#ifndef _WIN32
return conn->sshConn->sshPid;
#else
// TODO: Implement
return 0;
#endif
}
LegacySSHStore::ConnectionStats LegacySSHStore::getConnectionStats()
{
auto conn(connections->get());
return {
.bytesReceived = conn->from.read,
.bytesSent = conn->to.written,
};
}
/**
* The legacy ssh protocol doesn't support checking for trusted-user.
* Try using ssh-ng:// instead if you want to know.

View file

@ -6,6 +6,7 @@
#include "ssh.hh"
#include "callback.hh"
#include "pool.hh"
#include "serve-protocol.hh"
namespace nix {
@ -24,6 +25,16 @@ struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig
const Setting<int> maxConnections{this, 1, "max-connections",
"Maximum number of concurrent SSH connections."};
/**
* Hack for hydra
*/
Strings extraSshArgs = {};
/**
* Exposed for hydra
*/
std::optional<size_t> connPipeSize;
const std::string name() override { return "SSH Store"; }
static std::set<std::string> uriSchemes() { return {"ssh"}; }
@ -60,11 +71,24 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
std::map<StorePath, UnkeyedValidPathInfo> queryPathInfosUncached(
const StorePathSet & paths);
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) override;
void narFromPath(const StorePath & path, Sink & sink) override;
/**
* Hands over the connection temporarily as source to the given
* function. The function must not consume beyond the NAR; it can
* not just blindly try to always read more bytes until it is
* cut-off.
*
* This is exposed for sake of Hydra.
*/
void narFromPath(const StorePath & path, std::function<void(Source &)> fun);
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
@ -93,6 +117,16 @@ public:
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override;
/**
* Note, the returned function must only be called once, or we'll
* try to read from the connection twice.
*
* @todo Use C++23 `std::move_only_function`.
*/
std::function<BuildResult()> buildDerivationAsync(
const StorePath & drvPath, const BasicDerivation & drv,
const ServeProto::BuildOptions & options);
void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
void ensurePath(const StorePath & path) override
@ -119,10 +153,36 @@ public:
StorePathSet queryValidPaths(const StorePathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute) override;
/**
* Custom variation that atomically creates temp locks on the remote
* side.
*
* This exists to prevent a race where the remote host
* garbage-collects paths that are already there. Optionally, ask
* the remote host to substitute missing paths.
*/
StorePathSet queryValidPaths(const StorePathSet & paths,
bool lock,
SubstituteFlag maybeSubstitute = NoSubstitute);
/**
* Just exists because this is exactly what Hydra was doing, and we
* don't yet want an algorithmic change.
*/
void addMultipleToStoreLegacy(Store & srcStore, const StorePathSet & paths);
void connect() override;
unsigned int getProtocol() override;
struct ConnectionStats {
size_t bytesReceived, bytesSent;
};
ConnectionStats getConnectionStats();
pid_t getConnectionPid();
/**
* The legacy ssh protocol doesn't support checking for trusted-user.
* Try using ssh-ng:// instead if you want to know.

View file

@ -136,7 +136,12 @@ LocalStore::LocalStore(
for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) {
createDirs(perUserDir);
if (!readOnly) {
if (chmod(perUserDir.c_str(), 0755) == -1)
auto st = lstat(perUserDir);
// Skip chmod call if the directory already has the correct permissions (0755).
// This is to avoid failing when the executing user lacks permissions to change the directory's permissions
// even if it would be no-op.
if ((st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) != 0755 && chmod(perUserDir.c_str(), 0755) == -1)
throw SysError("could not set permissions on '%s' to 755", perUserDir);
}
}

View file

@ -183,6 +183,7 @@ sources = files(
'binary-cache-store.cc',
'build-result.cc',
'build/derivation-goal.cc',
'build/derivation-creation-and-realisation-goal.cc',
'build/drv-output-substitution-goal.cc',
'build/entry-points.cc',
'build/goal.cc',
@ -196,6 +197,7 @@ sources = files(
'content-address.cc',
'daemon.cc',
'derivations.cc',
'derivation-options.cc',
'derived-path-map.cc',
'derived-path.cc',
'downstream-placeholder.cc',
@ -255,6 +257,7 @@ headers = [config_h] + files(
'binary-cache-store.hh',
'build-result.hh',
'build/derivation-goal.hh',
'build/derivation-creation-and-realisation-goal.hh',
'build/drv-output-substitution-goal.hh',
'build/goal.hh',
'build/substitution-goal.hh',
@ -267,6 +270,7 @@ headers = [config_h] + files(
'content-address.hh',
'daemon.hh',
'derivations.hh',
'derivation-options.hh',
'derived-path-map.hh',
'derived-path.hh',
'downstream-placeholder.hh',

View file

@ -2,6 +2,7 @@
#include "derivations.hh"
#include "parsed-derivations.hh"
#include "derivation-options.hh"
#include "globals.hh"
#include "store-api.hh"
#include "thread-pool.hh"
@ -222,8 +223,9 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
auto drv = make_ref<Derivation>(derivationFromPath(drvPath));
ParsedDerivation parsedDrv(StorePath(drvPath), *drv);
DerivationOptions drvOptions = DerivationOptions::fromParsedDerivation(parsedDrv);
if (!knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
if (!knownOutputPaths && settings.useSubstitutes && drvOptions.substitutesAllowed()) {
experimentalFeatureSettings.require(Xp::CaDerivations);
// If there are unknown output paths, attempt to find if the
@ -253,7 +255,7 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
}
}
if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
if (knownOutputPaths && settings.useSubstitutes && drvOptions.substitutesAllowed()) {
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
for (auto & output : invalid)
pool.enqueue(std::bind(checkOutput, drvPath, drv, output, drvState));

View file

@ -69,14 +69,6 @@ mkMesonLibrary (finalAttrs: {
nlohmann_json
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags =
[
(lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux)

View file

@ -87,47 +87,12 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
}
}
StringSet ParsedDerivation::getRequiredSystemFeatures() const
std::optional<StringSet> ParsedDerivation::getStringSetAttr(const std::string & name) const
{
// FIXME: cache this?
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
if (!drv.type().hasKnownOutputPaths())
res.insert("ca-derivations");
return res;
}
bool ParsedDerivation::canBuildLocally(Store & localStore) const
{
if (drv.platform != settings.thisSystem.get()
&& !settings.extraPlatforms.get().count(drv.platform)
&& !drv.isBuiltin())
return false;
if (settings.maxBuildJobs.get() == 0
&& !drv.isBuiltin())
return false;
for (auto & feature : getRequiredSystemFeatures())
if (!localStore.systemFeatures.get().count(feature)) return false;
return true;
}
bool ParsedDerivation::willBuildLocally(Store & localStore) const
{
return getBoolAttr("preferLocalBuild") && canBuildLocally(localStore);
}
bool ParsedDerivation::substitutesAllowed() const
{
return settings.alwaysAllowSubstitutes ? true : getBoolAttr("allowSubstitutes", true);
}
bool ParsedDerivation::useUidRange() const
{
return getRequiredSystemFeatures().count("uid-range");
auto ss = getStringsAttr(name);
return ss
? (std::optional{StringSet{ss->begin(), ss->end()}})
: (std::optional<StringSet>{});
}
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
@ -188,7 +153,6 @@ static nlohmann::json pathInfoToJSON(
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
{
auto structuredAttrs = getStructuredAttrs();
if (!structuredAttrs) return std::nullopt;
auto json = *structuredAttrs;

View file

@ -8,38 +8,40 @@
namespace nix {
struct DerivationOptions;
class ParsedDerivation
{
StorePath drvPath;
BasicDerivation & drv;
std::unique_ptr<nlohmann::json> structuredAttrs;
public:
ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv);
~ParsedDerivation();
const nlohmann::json * getStructuredAttrs() const
{
return structuredAttrs.get();
}
std::optional<std::string> getStringAttr(const std::string & name) const;
bool getBoolAttr(const std::string & name, bool def = false) const;
std::optional<Strings> getStringsAttr(const std::string & name) const;
StringSet getRequiredSystemFeatures() const;
std::optional<StringSet> getStringSetAttr(const std::string & name) const;
bool canBuildLocally(Store & localStore) const;
/**
* Only `DerivationOptions` is allowed to parse individual fields
* from `ParsedDerivation`. This ensure that it includes all
* derivation options, and, the likes of `LocalDerivationGoal` are
* incapable of more ad-hoc options.
*/
friend struct DerivationOptions;
bool willBuildLocally(Store & localStore) const;
public:
bool substitutesAllowed() const;
ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv);
bool useUidRange() const;
~ParsedDerivation();
bool hasStructuredAttrs() const
{
return static_cast<bool>(structuredAttrs);
}
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};

View file

@ -240,4 +240,19 @@ Path SSHMaster::startMaster()
#endif
void SSHMaster::Connection::trySetBufferSize(size_t size)
{
#ifdef F_SETPIPE_SZ
/* This `fcntl` method of doing this takes a positive `int`. Check
and convert accordingly.
The function overall still takes `size_t` because this is more
portable for a platform-agnostic interface. */
assert(size <= INT_MAX);
int pipesize = size;
fcntl(in.get(), F_SETPIPE_SZ, pipesize);
fcntl(out.get(), F_SETPIPE_SZ, pipesize);
#endif
}
}

View file

@ -54,6 +54,18 @@ public:
Pid sshPid;
#endif
AutoCloseFD out, in;
/**
* Try to set the buffer size in both directions to the
* designated amount, if possible. If not possible, does
* nothing.
*
* Current implementation is to use `fcntl` with `F_SETPIPE_SZ`,
* which is Linux-only. For this implementation, `size` must
* convertable to an `int`. In other words, it must be within
* `[0, INT_MAX]`.
*/
void trySetBufferSize(size_t size);
};
/**

View file

@ -230,18 +230,22 @@ void Store::addMultipleToStore(
{
std::atomic<size_t> nrDone{0};
std::atomic<size_t> nrFailed{0};
std::atomic<uint64_t> bytesExpected{0};
std::atomic<uint64_t> nrRunning{0};
using PathWithInfo = std::pair<ValidPathInfo, std::unique_ptr<Source>>;
uint64_t bytesExpected = 0;
std::map<StorePath, PathWithInfo *> infosMap;
StorePathSet storePathsToAdd;
for (auto & thingToAdd : pathsToCopy) {
bytesExpected += thingToAdd.first.narSize;
infosMap.insert_or_assign(thingToAdd.first.path, &thingToAdd);
storePathsToAdd.insert(thingToAdd.first.path);
}
act.setExpected(actCopyPath, bytesExpected);
auto showProgress = [&, nrTotal = pathsToCopy.size()]() {
act.progress(nrDone, nrTotal, nrRunning, nrFailed);
};
@ -259,9 +263,6 @@ void Store::addMultipleToStore(
return StorePathSet();
}
bytesExpected += info.narSize;
act.setExpected(actCopyPath, bytesExpected);
return info.references;
},

View file

@ -184,10 +184,6 @@ void LocalDerivationGoal::killSandbox(bool getStats)
Goal::Co LocalDerivationGoal::tryLocalBuild()
{
#if __APPLE__
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
worker.waitForBuildSlot(shared_from_this());
@ -200,13 +196,12 @@ Goal::Co LocalDerivationGoal::tryLocalBuild()
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
if (drvOptions->noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
if (drvOptions->additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
@ -215,7 +210,7 @@ Goal::Co LocalDerivationGoal::tryLocalBuild()
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType->isSandboxed() && !noChroot;
useChroot = derivationType->isSandboxed() && !drvOptions->noChroot;
}
auto & localStore = getLocalStore();
@ -240,7 +235,7 @@ Goal::Co LocalDerivationGoal::tryLocalBuild()
if (useBuildUsers()) {
if (!buildUser)
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
buildUser = acquireUserLock(drvOptions->useUidRange(*drv) ? 65536 : 1, useChroot);
if (!buildUser) {
if (!actLock)
@ -531,13 +526,19 @@ void LocalDerivationGoal::startBuilder()
killSandbox(false);
/* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
drv->platform,
concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()),
worker.store.printStorePath(drvPath),
settings.thisSystem,
concatStringsSep<StringSet>(", ", worker.store.systemFeatures));
if (!drvOptions->canBuildLocally(worker.store, *drv)) {
// since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should tell them to run the command to install Darwin 2
if (drv->platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") {
throw Error("run `/usr/sbin/softwareupdate --install-rosetta` to enable your %s to run programs for %s", settings.thisSystem, drv->platform);
} else {
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
drv->platform,
concatStringsSep(", ", drvOptions->getRequiredSystemFeatures(*drv)),
worker.store.printStorePath(drvPath),
settings.thisSystem,
concatStringsSep<StringSet>(", ", worker.store.systemFeatures));
}
}
/* Create a temporary directory where the build will take
place. */
@ -622,7 +623,7 @@ void LocalDerivationGoal::startBuilder()
writeStructuredAttrs();
/* Handle exportReferencesGraph(), if set. */
if (!parsedDrv->getStructuredAttrs()) {
if (!parsedDrv->hasStructuredAttrs()) {
/* The `exportReferencesGraph' feature allows the references graph
to be passed to a builder. This attribute should be a list of
pairs [name1 path1 name2 path2 ...]. The references graph of
@ -696,7 +697,7 @@ void LocalDerivationGoal::startBuilder()
PathSet allowedPaths = settings.allowedImpureHostPrefixes;
/* This works like the above, except on a per-derivation level */
auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings());
auto impurePaths = drvOptions->impureHostDeps;
for (auto & i : impurePaths) {
bool found = false;
@ -716,7 +717,7 @@ void LocalDerivationGoal::startBuilder()
throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps",
worker.store.printStorePath(drvPath), i);
/* Allow files in __impureHostDeps to be missing; e.g.
/* Allow files in drvOptions->impureHostDeps to be missing; e.g.
macOS 11+ has no /usr/lib/libSystem*.dylib */
pathsInChroot[i] = {i, true};
}
@ -756,10 +757,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
if (parsedDrv->useUidRange())
if (drvOptions->useUidRange(*drv))
chownToBuilder(chrootRootDir + "/etc");
if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
if (drvOptions->useUidRange(*drv) && (!buildUser || buildUser->getUIDCount() < 65536))
throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent
@ -818,7 +819,7 @@ void LocalDerivationGoal::startBuilder()
}
#else
if (parsedDrv->useUidRange())
if (drvOptions->useUidRange(*drv))
throw Error("feature 'uid-range' is not supported on this platform");
#if __APPLE__
/* We don't really have any parent prep work to do (yet?)
@ -828,7 +829,7 @@ void LocalDerivationGoal::startBuilder()
#endif
#endif
} else {
if (parsedDrv->useUidRange())
if (drvOptions->useUidRange(*drv))
throw Error("feature 'uid-range' is only supported in sandboxed builds");
}
@ -873,7 +874,7 @@ void LocalDerivationGoal::startBuilder()
/* Fire up a Nix daemon to process recursive Nix calls from the
builder. */
if (parsedDrv->getRequiredSystemFeatures().count("recursive-nix"))
if (drvOptions->getRequiredSystemFeatures(*drv).count("recursive-nix"))
startDaemon();
/* Run the builder. */
@ -1141,18 +1142,12 @@ void LocalDerivationGoal::initTmpDir()
tmpDirInSandbox = tmpDir;
#endif
/* In non-structured mode, add all bindings specified in the
derivation via the environment, except those listed in the
passAsFile attribute. Those are passed as file names pointing
to temporary files containing the contents. Note that
passAsFile is ignored in structure mode because it's not
needed (attributes are not passed through the environment, so
there is no size constraint). */
if (!parsedDrv->getStructuredAttrs()) {
StringSet passAsFile = tokenizeString<StringSet>(getOr(drv->env, "passAsFile", ""));
/* In non-structured mode, set all bindings either directory in the
environment or via a file, as specified by
`DerivationOptions::passAsFile`. */
if (!parsedDrv->hasStructuredAttrs()) {
for (auto & i : drv->env) {
if (passAsFile.find(i.first) == passAsFile.end()) {
if (drvOptions->passAsFile.find(i.first) == drvOptions->passAsFile.end()) {
env[i.first] = i.second;
} else {
auto hash = hashString(HashAlgorithm::SHA256, i.first);
@ -1229,7 +1224,7 @@ void LocalDerivationGoal::initEnv()
if (!impureEnv.empty())
experimentalFeatureSettings.require(Xp::ConfigurableImpureEnv);
for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings())) {
for (auto & i : drvOptions->impureEnvVars){
auto envVar = impureEnv.find(i);
if (envVar != impureEnv.end()) {
env[i] = envVar->second;
@ -1989,7 +1984,7 @@ void LocalDerivationGoal::runChild()
}
/* Make /etc unwritable */
if (!parsedDrv->useUidRange())
if (!drvOptions->useUidRange(*drv))
chmod_(chrootRootDir + "/etc", 0555);
/* Unshare this mount namespace. This is necessary because
@ -2176,7 +2171,7 @@ void LocalDerivationGoal::runChild()
}
sandboxProfile += ")\n";
sandboxProfile += additionalSandboxProfile;
sandboxProfile += drvOptions->additionalSandboxProfile;
} else
sandboxProfile +=
#include "sandbox-minimal.sb"
@ -2185,8 +2180,6 @@ void LocalDerivationGoal::runChild()
debug("Generated sandbox profile:");
debug(sandboxProfile);
bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking");
/* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
to find temporary directories, so we want to open up a broader place for them to put their files, if needed. */
Path globalTmpDir = canonPath(defaultTempDir(), true);
@ -2199,7 +2192,7 @@ void LocalDerivationGoal::runChild()
Strings sandboxArgs;
sandboxArgs.push_back("_GLOBAL_TMP_DIR");
sandboxArgs.push_back(globalTmpDir);
if (allowLocalNetworking) {
if (drvOptions->allowLocalNetworking) {
sandboxArgs.push_back("_ALLOW_LOCAL_NETWORKING");
sandboxArgs.push_back("1");
}
@ -2219,7 +2212,7 @@ void LocalDerivationGoal::runChild()
/* Execute the program. This should not return. */
if (drv->isBuiltin()) {
try {
logger = makeJSONLogger(*logger);
logger = makeJSONLogger(getStandardError());
std::map<std::string, Path> outputs;
for (auto & e : drv->outputs)
@ -2389,14 +2382,8 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
inodesSeen);
bool discardReferences = false;
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) {
if (auto output = get(*udr, outputName)) {
if (!output->is_boolean())
throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string());
discardReferences = output->get<bool>();
}
}
if (auto udr = get(drvOptions->unsafeDiscardReferences, outputName)) {
discardReferences = *udr;
}
StorePathSet references;
@ -2867,13 +2854,6 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
auto & outputName = output.first;
auto & info = output.second;
struct Checks
{
bool ignoreSelfRefs = false;
std::optional<uint64_t> maxSize, maxClosureSize;
std::optional<Strings> allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites;
};
/* Compute the closure and closure size of some output. This
is slightly tricky because some of its references (namely
other outputs) may not be valid yet. */
@ -2905,7 +2885,7 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
return std::make_pair(std::move(pathsDone), closureSize);
};
auto applyChecks = [&](const Checks & checks)
auto applyChecks = [&](const DerivationOptions::OutputChecks & checks)
{
if (checks.maxSize && info.narSize > *checks.maxSize)
throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes",
@ -2918,15 +2898,13 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
worker.store.printStorePath(info.path), closureSize, *checks.maxClosureSize);
}
auto checkRefs = [&](const std::optional<Strings> & value, bool allowed, bool recursive)
auto checkRefs = [&](const StringSet & value, bool allowed, bool recursive)
{
if (!value) return;
/* Parse a list of reference specifiers. Each element must
either be a store path, or the symbolic name of the output
of the derivation (such as `out'). */
StorePathSet spec;
for (auto & i : *value) {
for (auto & i : value) {
if (worker.store.isStorePath(i))
spec.insert(worker.store.parseStorePath(i));
else if (auto output = get(outputs, i))
@ -2964,73 +2942,35 @@ void LocalDerivationGoal::checkOutputs(const std::map<std::string, ValidPathInfo
}
};
checkRefs(checks.allowedReferences, true, false);
checkRefs(checks.allowedRequisites, true, true);
checkRefs(checks.disallowedReferences, false, false);
checkRefs(checks.disallowedRequisites, false, true);
/* Mandatory check: absent whitelist, and present but empty
whitelist mean very different things. */
if (auto & refs = checks.allowedReferences) {
checkRefs(*refs, true, false);
}
if (auto & refs = checks.allowedRequisites) {
checkRefs(*refs, true, true);
}
/* Optimization: don't need to do anything when
disallowed and empty set. */
if (!checks.disallowedReferences.empty()) {
checkRefs(checks.disallowedReferences, false, false);
}
if (!checks.disallowedRequisites.empty()) {
checkRefs(checks.disallowedRequisites, false, true);
}
};
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
if (get(*structuredAttrs, "allowedReferences")){
warn("'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "allowedRequisites")){
warn("'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "disallowedRequisites")){
warn("'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "disallowedReferences")){
warn("'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "maxSize")){
warn("'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead");
}
if (get(*structuredAttrs, "maxClosureSize")){
warn("'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead");
}
if (auto outputChecks = get(*structuredAttrs, "outputChecks")) {
if (auto output = get(*outputChecks, outputName)) {
Checks checks;
std::visit(overloaded{
[&](const DerivationOptions::OutputChecks & checks) {
applyChecks(checks);
},
[&](const std::map<std::string, DerivationOptions::OutputChecks> & checksPerOutput) {
if (auto outputChecks = get(checksPerOutput, outputName))
if (auto maxSize = get(*output, "maxSize"))
checks.maxSize = maxSize->get<uint64_t>();
if (auto maxClosureSize = get(*output, "maxClosureSize"))
checks.maxClosureSize = maxClosureSize->get<uint64_t>();
auto get_ = [&](const std::string & name) -> std::optional<Strings> {
if (auto i = get(*output, name)) {
Strings res;
for (auto j = i->begin(); j != i->end(); ++j) {
if (!j->is_string())
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, worker.store.printStorePath(drvPath));
res.push_back(j->get<std::string>());
}
checks.disallowedRequisites = res;
return res;
}
return {};
};
checks.allowedReferences = get_("allowedReferences");
checks.allowedRequisites = get_("allowedRequisites");
checks.disallowedReferences = get_("disallowedReferences");
checks.disallowedRequisites = get_("disallowedRequisites");
applyChecks(checks);
}
}
} else {
// legacy non-structured-attributes case
Checks checks;
checks.ignoreSelfRefs = true;
checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences");
checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites");
checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences");
checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites");
applyChecks(checks);
}
applyChecks(*outputChecks);
},
}, drvOptions->outputChecks);
}
}

View file

@ -109,11 +109,6 @@ struct LocalDerivationGoal : public DerivationGoal
typedef map<std::string, std::string> Environment;
Environment env;
#if __APPLE__
typedef std::string SandboxProfile;
SandboxProfile additionalSandboxProfile;
#endif
/**
* Hash rewriting.
*/

View file

@ -34,14 +34,6 @@ mkMesonLibrary (finalAttrs: {
nix-util
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -38,14 +38,6 @@ mkMesonLibrary (finalAttrs: {
rapidcheck
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -45,14 +45,6 @@ mkMesonExecutable (finalAttrs: {
gtest
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
];

View file

@ -125,6 +125,8 @@ constexpr std::array<ExperimentalFeatureDetails, numXpFeatures> xpFeatureDetails
runCommand "foo"
{
# Optional: let Nix know "foo" requires the experimental feature
requiredSystemFeatures = [ "recursive-nix" ];
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${<nixpkgs>}";
}

View file

@ -1,6 +1,3 @@
#include "file-system.hh"
#include "signals.hh"
#include "finally.hh"
#include "serialise.hh"
#include "util.hh"

View file

@ -3,6 +3,7 @@
#include "types.hh"
#include <nlohmann/json_fwd.hpp>
#include <iostream>
#include <optional>
namespace nix {
@ -38,6 +39,15 @@ std::optional<nlohmann::json> optionalValueAt(const nlohmann::json::object_t & m
return std::optional { map.at(key) };
}
std::optional<nlohmann::json> nullableValueAt(const nlohmann::json::object_t & map, const std::string & key)
{
auto value = valueAt(map, key);
if (value.is_null())
return std::nullopt;
return std::optional { std::move(value) };
}
const nlohmann::json * getNullable(const nlohmann::json & value)
{

View file

@ -25,6 +25,7 @@ const nlohmann::json & valueAt(
const std::string & key);
std::optional<nlohmann::json> optionalValueAt(const nlohmann::json::object_t & value, const std::string & key);
std::optional<nlohmann::json> nullableValueAt(const nlohmann::json::object_t & value, const std::string & key);
/**
* Downcast the json object, failing with a nice error if the conversion fails.
@ -69,6 +70,9 @@ struct json_avoids_null<std::vector<T>> : std::true_type {};
template<typename T>
struct json_avoids_null<std::list<T>> : std::true_type {};
template<typename T>
struct json_avoids_null<std::set<T>> : std::true_type {};
template<typename K, typename V>
struct json_avoids_null<std::map<K, V>> : std::true_type {};

View file

@ -29,7 +29,7 @@ void setCurActivity(const ActivityId activityId)
curActivity = activityId;
}
Logger * logger = makeSimpleLogger(true);
std::unique_ptr<Logger> logger = makeSimpleLogger(true);
void Logger::warn(const std::string & msg)
{
@ -128,9 +128,9 @@ void writeToStderr(std::string_view s)
}
}
Logger * makeSimpleLogger(bool printBuildLogs)
std::unique_ptr<Logger> makeSimpleLogger(bool printBuildLogs)
{
return new SimpleLogger(printBuildLogs);
return std::make_unique<SimpleLogger>(printBuildLogs);
}
std::atomic<uint64_t> nextId{0};
@ -167,9 +167,9 @@ void to_json(nlohmann::json & json, std::shared_ptr<Pos> pos)
}
struct JSONLogger : Logger {
Logger & prevLogger;
Descriptor fd;
JSONLogger(Logger & prevLogger) : prevLogger(prevLogger) { }
JSONLogger(Descriptor fd) : fd(fd) { }
bool isVerbose() override {
return true;
@ -190,7 +190,7 @@ struct JSONLogger : Logger {
void write(const nlohmann::json & json)
{
prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace));
writeLine(fd, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace));
}
void log(Verbosity lvl, std::string_view s) override
@ -262,9 +262,9 @@ struct JSONLogger : Logger {
}
};
Logger * makeJSONLogger(Logger & prevLogger)
std::unique_ptr<Logger> makeJSONLogger(Descriptor fd)
{
return new JSONLogger(prevLogger);
return std::make_unique<JSONLogger>(fd);
}
static Logger::Fields getFields(nlohmann::json & json)

View file

@ -3,6 +3,7 @@
#include "error.hh"
#include "config.hh"
#include "file-descriptor.hh"
#include <nlohmann/json_fwd.hpp>
@ -179,11 +180,11 @@ struct PushActivity
~PushActivity() { setCurActivity(prevAct); }
};
extern Logger * logger;
extern std::unique_ptr<Logger> logger;
Logger * makeSimpleLogger(bool printBuildLogs = true);
std::unique_ptr<Logger> makeSimpleLogger(bool printBuildLogs = true);
Logger * makeJSONLogger(Logger & prevLogger);
std::unique_ptr<Logger> makeJSONLogger(Descriptor fd);
/**
* @param source A noun phrase describing the source of the message, e.g. "the builder".

View file

@ -153,6 +153,7 @@ sources = files(
'json-utils.cc',
'logging.cc',
'memory-source-accessor.cc',
'mounted-source-accessor.cc',
'position.cc',
'posix-source-accessor.cc',
'references.cc',
@ -166,6 +167,7 @@ sources = files(
'tarfile.cc',
'terminal.cc',
'thread-pool.cc',
'union-source-accessor.cc',
'unix-domain-socket.cc',
'url.cc',
'users.cc',

View file

@ -1,4 +1,4 @@
#include "mounted-source-accessor.hh"
#include "source-accessor.hh"
namespace nix {
@ -23,12 +23,6 @@ struct MountedSourceAccessor : SourceAccessor
return accessor->readFile(subpath);
}
bool pathExists(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->pathExists(subpath);
}
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
@ -69,6 +63,12 @@ struct MountedSourceAccessor : SourceAccessor
path.pop();
}
}
std::optional<std::filesystem::path> getPhysicalPath(const CanonPath & path) override
{
auto [accessor, subpath] = resolve(path);
return accessor->getPhysicalPath(subpath);
}
};
ref<SourceAccessor> makeMountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> mounts)

View file

@ -54,17 +54,6 @@ mkMesonLibrary (finalAttrs: {
nlohmann_json
];
preConfigure =
# "Inline" .version so it's not a symlink, and includes the suffix.
# Do the meson utils, without modification.
#
# TODO: change release process to add `pre` in `.version`, remove it
# before tagging, and restore after.
''
chmod u+w ./.version
echo ${version} > ../../.version
'';
mesonFlags = [
(lib.mesonEnable "cpuid" stdenv.hostPlatform.isx86_64)
];

View file

@ -214,4 +214,12 @@ ref<SourceAccessor> getFSSourceAccessor();
*/
ref<SourceAccessor> makeFSSourceAccessor(std::filesystem::path root);
ref<SourceAccessor> makeMountedSourceAccessor(std::map<CanonPath, ref<SourceAccessor>> mounts);
/**
* Construct an accessor that presents a "union" view of a vector of
* underlying accessors. Earlier accessors take precedence over later.
*/
ref<SourceAccessor> makeUnionSourceAccessor(std::vector<ref<SourceAccessor>> && accessors);
}

View file

@ -0,0 +1,82 @@
#include "source-accessor.hh"
namespace nix {
struct UnionSourceAccessor : SourceAccessor
{
std::vector<ref<SourceAccessor>> accessors;
UnionSourceAccessor(std::vector<ref<SourceAccessor>> _accessors)
: accessors(std::move(_accessors))
{
displayPrefix.clear();
}
std::string readFile(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (st)
return accessor->readFile(path);
}
throw FileNotFound("path '%s' does not exist", showPath(path));
}
std::optional<Stat> maybeLstat(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (st)
return st;
}
return std::nullopt;
}
DirEntries readDirectory(const CanonPath & path) override
{
DirEntries result;
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (!st)
continue;
for (auto & entry : accessor->readDirectory(path))
// Don't override entries from previous accessors.
result.insert(entry);
}
return result;
}
std::string readLink(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto st = accessor->maybeLstat(path);
if (st)
return accessor->readLink(path);
}
throw FileNotFound("path '%s' does not exist", showPath(path));
}
std::string showPath(const CanonPath & path) override
{
for (auto & accessor : accessors)
return accessor->showPath(path);
return SourceAccessor::showPath(path);
}
std::optional<std::filesystem::path> getPhysicalPath(const CanonPath & path) override
{
for (auto & accessor : accessors) {
auto p = accessor->getPhysicalPath(path);
if (p)
return p;
}
return std::nullopt;
}
};
ref<SourceAccessor> makeUnionSourceAccessor(std::vector<ref<SourceAccessor>> && accessors)
{
return make_ref<UnionSourceAccessor>(std::move(accessors));
}
}

View file

@ -200,8 +200,15 @@ static int childEntry(void * arg)
pid_t startProcess(std::function<void()> fun, const ProcessOptions & options)
{
ChildWrapperFunction wrapper = [&] {
if (!options.allowVfork)
if (!options.allowVfork) {
/* Set a simple logger, while releasing (not destroying)
the parent logger. We don't want to run the parent
logger's destructor since that will crash (e.g. when
~ProgressBar() tries to join a thread that doesn't
exist. */
logger.release();
logger = makeSimpleLogger();
}
try {
#if __linux__
if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1)

View file

@ -11,6 +11,7 @@
#include "current-process.hh"
#include "parsed-derivations.hh"
#include "derivation-options.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "globals.hh"
@ -27,6 +28,7 @@
#include "users.hh"
#include "network-proxy.hh"
#include "compatibility-settings.hh"
#include "man-pages.hh"
using namespace nix;
using namespace std::string_literals;
@ -542,12 +544,13 @@ static void main_nix_build(int argc, char * * argv)
env["NIX_STORE"] = store->storeDir;
env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores);
auto passAsFile = tokenizeString<StringSet>(getOr(drv.env, "passAsFile", ""));
ParsedDerivation parsedDrv(packageInfo.requireDrvPath(), drv);
DerivationOptions drvOptions = DerivationOptions::fromParsedDerivation(parsedDrv);
int fileNr = 0;
for (auto & var : drv.env)
if (passAsFile.count(var.first)) {
if (drvOptions.passAsFile.count(var.first)) {
auto fn = ".attr-" + std::to_string(fileNr++);
Path p = (tmpDir.path() / fn).string();
writeFile(p, var.second);
@ -557,7 +560,7 @@ static void main_nix_build(int argc, char * * argv)
std::string structuredAttrsRC;
if (env.count("__json")) {
if (parsedDrv.hasStructuredAttrs()) {
StorePathSet inputs;
std::function<void(const StorePath &, const DerivedPathMap<StringSet>::ChildNode &)> accumInputClosure;
@ -575,8 +578,6 @@ static void main_nix_build(int argc, char * * argv)
for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map)
accumInputClosure(inputDrv, inputNode);
ParsedDerivation parsedDrv(packageInfo.requireDrvPath(), drv);
if (auto structAttrs = parsedDrv.prepareStructuredAttrs(*store, inputs)) {
auto json = structAttrs.value();
structuredAttrsRC = writeStructuredAttrsShell(json);

Some files were not shown because too many files have changed in this diff Show more