diff --git a/doc/manual/book.toml b/doc/manual/book.toml.in similarity index 96% rename from doc/manual/book.toml rename to doc/manual/book.toml.in index 213739174..34acf642e 100644 --- a/doc/manual/book.toml +++ b/doc/manual/book.toml.in @@ -1,5 +1,5 @@ [book] -title = "Nix Reference Manual" +title = "Nix @version@ Reference Manual" src = "source" [output.html] diff --git a/doc/manual/meson.build b/doc/manual/meson.build index c4cc9b893..8796cee63 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -83,6 +83,7 @@ manual = custom_target( ''' @0@ @INPUT0@ @CURRENT_SOURCE_DIR@ > @DEPFILE@ @0@ @INPUT1@ summary @2@ < @CURRENT_SOURCE_DIR@/source/SUMMARY.md.in > @2@/source/SUMMARY.md + sed -e 's|@version@|@3@|g' < @INPUT2@ > @2@/book.toml rsync -r --include='*.md' @CURRENT_SOURCE_DIR@/ @2@/ (cd @2@; RUST_LOG=warn @1@ build -d @2@ 3>&2 2>&1 1>&3) | { grep -Fv "because fragment resolution isn't implemented" || :; } 3>&2 2>&1 1>&3 rm -rf @2@/manual @@ -92,12 +93,13 @@ manual = custom_target( python.full_path(), mdbook.full_path(), meson.current_build_dir(), + meson.project_version(), ), ], input : [ generate_manual_deps, 'substitute.py', - 'book.toml', + 'book.toml.in', 'anchors.jq', 'custom.css', nix3_cli_files, diff --git a/doc/manual/rl-next/git-lfs-support.md b/doc/manual/rl-next/git-lfs-support.md new file mode 100644 index 000000000..4b6e0ca86 --- /dev/null +++ b/doc/manual/rl-next/git-lfs-support.md @@ -0,0 +1,18 @@ +--- +synopsis: "Git LFS support" +prs: [10153, 12468] +--- + +The Git fetcher now supports Large File Storage (LFS). This can be enabled by passing the attribute `lfs = true` to the fetcher, e.g. +```console +nix flake prefetch 'git+ssh://git@github.com/Apress/repo-with-large-file-storage.git?lfs=1' +``` + +A flake can also declare that it requires lfs to be enabled: +``` +{ + inputs.self.lfs = true; +} +``` + +Author: [**@b-camacho**](https://github.com/b-camacho), [**@kip93**](https://github.com/kip93) diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index c5a173dc7..da465d090 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -28,7 +28,7 @@ $ nix-shell --attr devShells.x86_64-linux.native-clangStdenvPackages > **Note** > -> You can use `native-ccacheStdenvPackages` to drastically improve rebuild time. +> You can use `native-ccacheStdenv` to drastically improve rebuild time. > By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. To build Nix itself in this shell: diff --git a/flake.lock b/flake.lock index e45f0fb98..ce484a67a 100644 --- a/flake.lock +++ b/flake.lock @@ -36,24 +36,6 @@ "type": "github" } }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "git-hooks-nix": { "inputs": { "flake-compat": [], @@ -79,24 +61,6 @@ "type": "github" } }, - "nixfmt": { - "inputs": { - "flake-utils": "flake-utils" - }, - "locked": { - "lastModified": 1736283758, - "narHash": "sha256-hrKhUp2V2fk/dvzTTHFqvtOg000G1e+jyIam+D4XqhA=", - "owner": "NixOS", - "repo": "nixfmt", - "rev": "8d4bd690c247004d90d8554f0b746b1231fe2436", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixfmt", - "type": "github" - } - }, "nixpkgs": { "locked": { "lastModified": 1734359947, @@ -150,26 +114,10 @@ "flake-compat": "flake-compat", "flake-parts": "flake-parts", "git-hooks-nix": "git-hooks-nix", - "nixfmt": "nixfmt", "nixpkgs": "nixpkgs", "nixpkgs-23-11": "nixpkgs-23-11", "nixpkgs-regression": "nixpkgs-regression" } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 2f70dffbe..895a081f2 100644 --- a/flake.nix +++ b/flake.nix @@ -20,7 +20,6 @@ # work around 7730 and https://github.com/NixOS/nix/issues/7807 inputs.git-hooks-nix.inputs.flake-compat.follows = ""; inputs.git-hooks-nix.inputs.gitignore.follows = ""; - inputs.nixfmt.url = "github:NixOS/nixfmt"; outputs = inputs@{ @@ -155,6 +154,7 @@ f = import ./packaging/components.nix { inherit (final) lib; inherit officialRelease; + pkgs = final; src = self; }; }; @@ -224,6 +224,30 @@ LANG=C.UTF-8 ${pkgs.changelog-d}/bin/changelog-d ${./doc/manual/rl-next} >$out ''; repl-completion = nixpkgsFor.${system}.native.callPackage ./tests/repl-completion.nix { }; + + /** + Checks for our packaging expressions. + This shouldn't build anything significant; just check that things + (including derivations) are _set up_ correctly. + */ + packaging-overriding = + let + pkgs = nixpkgsFor.${system}.native; + nix = self.packages.${system}.nix; + in + assert (nix.appendPatches [ pkgs.emptyFile ]).libs.nix-util.src.patches == [ pkgs.emptyFile ]; + if pkgs.stdenv.buildPlatform.isDarwin then + lib.warn "packaging-overriding check currently disabled because of a permissions issue on macOS" pkgs.emptyFile + else + # If this fails, something might be wrong with how we've wired the scope, + # or something could be broken in Nixpkgs. + pkgs.testers.testEqualContents { + assertion = "trivial patch does not change source contents"; + expected = "${./.}"; + actual = + # Same for all components; nix-util is an arbitrary pick + (nix.appendPatches [ pkgs.emptyFile ]).libs.nix-util.src; + }; } // (lib.optionalAttrs (builtins.elem system linux64BitSystems)) { dockerImage = self.hydraJobs.dockerImage.${system}; @@ -378,7 +402,7 @@ devShells = let - makeShell = import ./packaging/dev-shell.nix { inherit inputs lib devFlake; }; + makeShell = import ./packaging/dev-shell.nix { inherit lib devFlake; }; prefixAttrs = prefix: lib.concatMapAttrs (k: v: { "${prefix}-${k}" = v; }); in forAllSystems ( diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 208296194..4d504b8ee 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -39,7 +39,6 @@ }; nixfmt-rfc-style = { enable = true; - package = inputs.nixfmt.packages.${pkgs.hostPlatform.system}.default; excludes = [ # Invalid ''^tests/functional/lang/parse-.*\.nix$'' diff --git a/packaging/components.nix b/packaging/components.nix index 07bb209cd..9da864887 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -1,5 +1,6 @@ { lib, + pkgs, src, officialRelease, }: @@ -7,7 +8,23 @@ scope: let - inherit (scope) callPackage; + inherit (scope) + callPackage + ; + inherit + (scope.callPackage ( + { stdenv }: + { + inherit stdenv; + } + ) { }) + stdenv + ; + inherit (pkgs.buildPackages) + meson + ninja + pkg-config + ; baseVersion = lib.fileContents ../.version; @@ -20,6 +37,165 @@ let }_${src.shortRev or "dirty"}"; fineVersion = baseVersion + fineVersionSuffix; + + root = ../.; + + # Indirection for Nixpkgs to override when package.nix files are vendored + filesetToSource = lib.fileset.toSource; + + /** + Given a set of layers, create a mkDerivation-like function + */ + mkPackageBuilder = + exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn); + + setVersionLayer = finalAttrs: prevAttrs: { + preConfigure = + prevAttrs.prevAttrs or "" + + + # Update the repo-global .version file. + # Symlink ./.version points there, but by default only workDir is writable. + '' + chmod u+w ./.version + echo ${finalAttrs.version} > ./.version + ''; + }; + + localSourceLayer = + finalAttrs: prevAttrs: + let + workDirPath = + # Ideally we'd pick finalAttrs.workDir, but for now `mkDerivation` has + # the requirement that everything except passthru and meta must be + # serialized by mkDerivation, which doesn't work for this. + prevAttrs.workDir; + + workDirSubpath = lib.path.removePrefix root workDirPath; + sources = + assert prevAttrs.fileset._type == "fileset"; + prevAttrs.fileset; + src = lib.fileset.toSource { + fileset = sources; + inherit root; + }; + + in + { + sourceRoot = "${src.name}/" + workDirSubpath; + inherit src; + + # Clear what `derivation` can't/shouldn't serialize; see prevAttrs.workDir. + fileset = null; + workDir = null; + }; + + resolveRelPath = p: lib.path.removePrefix root p; + + makeFetchedSourceLayer = + finalScope: finalAttrs: prevAttrs: + let + workDirPath = + # Ideally we'd pick finalAttrs.workDir, but for now `mkDerivation` has + # the requirement that everything except passthru and meta must be + # serialized by mkDerivation, which doesn't work for this. + prevAttrs.workDir; + + workDirSubpath = resolveRelPath workDirPath; + + in + { + sourceRoot = "${finalScope.patchedSrc.name}/" + workDirSubpath; + src = finalScope.patchedSrc; + version = + let + n = lib.length finalScope.patches; + in + if n == 0 then finalAttrs.version else finalAttrs.version + "+${toString n}"; + + # Clear what `derivation` can't/shouldn't serialize; see prevAttrs.workDir. + fileset = null; + workDir = null; + }; + + mesonLayer = finalAttrs: prevAttrs: { + # NOTE: + # As of https://github.com/NixOS/nixpkgs/blob/8baf8241cea0c7b30e0b8ae73474cb3de83c1a30/pkgs/by-name/me/meson/setup-hook.sh#L26, + # `mesonBuildType` defaults to `plain` if not specified. We want our Nix-built binaries to be optimized by default. + # More on build types here: https://mesonbuild.com/Builtin-options.html#details-for-buildtype. + mesonBuildType = "release"; + # NOTE: + # Users who are debugging Nix builds are expected to set the environment variable `mesonBuildType`, per the + # guidance in https://github.com/NixOS/nix/blob/8a3fc27f1b63a08ac983ee46435a56cf49ebaf4a/doc/manual/source/development/debugging.md?plain=1#L10. + # For this reason, we don't want to refer to `finalAttrs.mesonBuildType` here, but rather use the environment variable. + preConfigure = + prevAttrs.preConfigure or "" + + + lib.optionalString + ( + !stdenv.hostPlatform.isWindows + # build failure + && !stdenv.hostPlatform.isStatic + # LTO breaks exception handling on x86-64-darwin. + && stdenv.system != "x86_64-darwin" + ) + '' + case "$mesonBuildType" in + release|minsize) appendToVar mesonFlags "-Db_lto=true" ;; + *) appendToVar mesonFlags "-Db_lto=false" ;; + esac + ''; + nativeBuildInputs = [ + meson + ninja + ] ++ prevAttrs.nativeBuildInputs or [ ]; + mesonCheckFlags = prevAttrs.mesonCheckFlags or [ ] ++ [ + "--print-errorlogs" + ]; + }; + + mesonBuildLayer = finalAttrs: prevAttrs: { + nativeBuildInputs = prevAttrs.nativeBuildInputs or [ ] ++ [ + pkg-config + ]; + separateDebugInfo = !stdenv.hostPlatform.isStatic; + hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie"; + env = + prevAttrs.env or { } + // lib.optionalAttrs ( + stdenv.isLinux + && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux") + && !(stdenv.hostPlatform.useLLVM or false) + ) { LDFLAGS = "-fuse-ld=gold"; }; + }; + + mesonLibraryLayer = finalAttrs: prevAttrs: { + outputs = prevAttrs.outputs or [ "out" ] ++ [ "dev" ]; + }; + + # Work around weird `--as-needed` linker behavior with BSD, see + # https://github.com/mesonbuild/meson/issues/3593 + bsdNoLinkAsNeeded = + finalAttrs: prevAttrs: + lib.optionalAttrs stdenv.hostPlatform.isBSD { + mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ]; + }; + + miscGoodPractice = finalAttrs: prevAttrs: { + strictDeps = prevAttrs.strictDeps or true; + enableParallelBuilding = true; + }; + + /** + Append patches to the source layer. + */ + appendPatches = + scope: patches: + scope.overrideScope ( + finalScope: prevScope: { + patches = prevScope.patches ++ patches; + } + ); + in # This becomes the pkgs.nixComponents attribute set @@ -27,6 +203,110 @@ in version = baseVersion + versionSuffix; inherit versionSuffix; + inherit filesetToSource; + + /** + A user-provided extension function to apply to each component derivation. + */ + mesonComponentOverrides = finalAttrs: prevAttrs: { }; + + /** + An overridable derivation layer for handling the sources. + */ + sourceLayer = localSourceLayer; + + /** + Resolve a path value to either itself or a path in the `src`, depending + whether `overrideSource` was called. + */ + resolvePath = p: p; + + /** + Apply an extension function (i.e. overlay-shaped) to all component derivations. + */ + overrideAllMesonComponents = + f: + scope.overrideScope ( + finalScope: prevScope: { + mesonComponentOverrides = lib.composeExtensions scope.mesonComponentOverrides f; + } + ); + + /** + Provide an alternate source. This allows the expressions to be vendored without copying the sources, + but it does make the build non-granular; all components will use a complete source. + + Packaging expressions will be ignored. + */ + overrideSource = + src: + scope.overrideScope ( + finalScope: prevScope: { + sourceLayer = makeFetchedSourceLayer finalScope; + /** + Unpatched source for the build of Nix. Packaging expressions will be ignored. + */ + src = src; + /** + Patches for the whole Nix source. Changes to packaging expressions will be ignored. + */ + patches = [ ]; + /** + Fetched and patched source to be used in component derivations. + */ + patchedSrc = + if finalScope.patches == [ ] then + src + else + pkgs.buildPackages.srcOnly ( + pkgs.buildPackages.stdenvNoCC.mkDerivation { + name = "${finalScope.src.name or "nix-source"}-patched"; + inherit (finalScope) src patches; + } + ); + resolvePath = p: finalScope.patchedSrc + "/${resolveRelPath p}"; + appendPatches = appendPatches finalScope; + } + ); + + /** + Append patches to be applied to the whole Nix source. + This affects all components. + + Changes to the packaging expressions will be ignored. + */ + appendPatches = + patches: + # switch to "fetched" source first, so that patches apply to the whole tree. + (scope.overrideSource "${./..}").appendPatches patches; + + mkMesonDerivation = mkPackageBuilder [ + miscGoodPractice + scope.sourceLayer + setVersionLayer + mesonLayer + scope.mesonComponentOverrides + ]; + mkMesonExecutable = mkPackageBuilder [ + miscGoodPractice + bsdNoLinkAsNeeded + scope.sourceLayer + setVersionLayer + mesonLayer + mesonBuildLayer + scope.mesonComponentOverrides + ]; + mkMesonLibrary = mkPackageBuilder [ + miscGoodPractice + bsdNoLinkAsNeeded + scope.sourceLayer + mesonLayer + setVersionLayer + mesonBuildLayer + mesonLibraryLayer + scope.mesonComponentOverrides + ]; + nix-util = callPackage ../src/libutil/package.nix { }; nix-util-c = callPackage ../src/libutil-c/package.nix { }; nix-util-test-support = callPackage ../src/libutil-test-support/package.nix { }; @@ -66,5 +346,33 @@ in nix-perl-bindings = callPackage ../src/perl/package.nix { }; - nix-everything = callPackage ../packaging/everything.nix { }; + nix-everything = callPackage ../packaging/everything.nix { } // { + # Note: no `passthru.overrideAllMesonComponents` + # This would propagate into `nix.overrideAttrs f`, but then discard + # `f` when `.overrideAllMesonComponents` is used. + # Both "methods" should be views on the same fixpoint overriding mechanism + # for that to work. For now, we intentionally don't support the broken + # two-fixpoint solution. + /** + Apply an extension function (i.e. overlay-shaped) to all component derivations, and return the nix package. + */ + overrideAllMesonComponents = f: (scope.overrideAllMesonComponents f).nix-everything; + + /** + Append patches to be applied to the whole Nix source. + This affects all components. + + Changes to the packaging expressions will be ignored. + */ + appendPatches = ps: (scope.appendPatches ps).nix-everything; + + /** + Provide an alternate source. This allows the expressions to be vendored without copying the sources, + but it does make the build non-granular; all components will use a complete source. + + Packaging expressions will be ignored. + */ + overrideSource = src: (scope.overrideSource src).nix-everything; + + }; } diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index afbc31fc6..535b3ff37 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -17,8 +17,6 @@ in let inherit (pkgs) lib; - root = ../.; - stdenv = if prevStdenv.isDarwin && prevStdenv.isx86_64 then darwinStdenv else prevStdenv; # Fix the following error with the default x86_64-darwin SDK: @@ -30,113 +28,6 @@ let # all the way back to 10.6. darwinStdenv = pkgs.overrideSDK prevStdenv { darwinMinVersion = "10.13"; }; - # Nixpkgs implements this by returning a subpath into the fetched Nix sources. - resolvePath = p: p; - - # Indirection for Nixpkgs to override when package.nix files are vendored - filesetToSource = lib.fileset.toSource; - - /** - Given a set of layers, create a mkDerivation-like function - */ - mkPackageBuilder = - exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn); - - localSourceLayer = - finalAttrs: prevAttrs: - let - workDirPath = - # Ideally we'd pick finalAttrs.workDir, but for now `mkDerivation` has - # the requirement that everything except passthru and meta must be - # serialized by mkDerivation, which doesn't work for this. - prevAttrs.workDir; - - workDirSubpath = lib.path.removePrefix root workDirPath; - sources = - assert prevAttrs.fileset._type == "fileset"; - prevAttrs.fileset; - src = lib.fileset.toSource { - fileset = sources; - inherit root; - }; - - in - { - sourceRoot = "${src.name}/" + workDirSubpath; - inherit src; - - # Clear what `derivation` can't/shouldn't serialize; see prevAttrs.workDir. - fileset = null; - workDir = null; - }; - - mesonLayer = finalAttrs: prevAttrs: { - # NOTE: - # As of https://github.com/NixOS/nixpkgs/blob/8baf8241cea0c7b30e0b8ae73474cb3de83c1a30/pkgs/by-name/me/meson/setup-hook.sh#L26, - # `mesonBuildType` defaults to `plain` if not specified. We want our Nix-built binaries to be optimized by default. - # More on build types here: https://mesonbuild.com/Builtin-options.html#details-for-buildtype. - mesonBuildType = "release"; - # NOTE: - # Users who are debugging Nix builds are expected to set the environment variable `mesonBuildType`, per the - # guidance in https://github.com/NixOS/nix/blob/8a3fc27f1b63a08ac983ee46435a56cf49ebaf4a/doc/manual/source/development/debugging.md?plain=1#L10. - # For this reason, we don't want to refer to `finalAttrs.mesonBuildType` here, but rather use the environment variable. - preConfigure = - prevAttrs.preConfigure or "" - + - lib.optionalString - ( - !stdenv.hostPlatform.isWindows - # build failure - && !stdenv.hostPlatform.isStatic - # LTO breaks exception handling on x86-64-darwin. - && stdenv.system != "x86_64-darwin" - ) - '' - case "$mesonBuildType" in - release|minsize) appendToVar mesonFlags "-Db_lto=true" ;; - *) appendToVar mesonFlags "-Db_lto=false" ;; - esac - ''; - nativeBuildInputs = [ - pkgs.buildPackages.meson - pkgs.buildPackages.ninja - ] ++ prevAttrs.nativeBuildInputs or [ ]; - mesonCheckFlags = prevAttrs.mesonCheckFlags or [ ] ++ [ - "--print-errorlogs" - ]; - }; - - mesonBuildLayer = finalAttrs: prevAttrs: { - nativeBuildInputs = prevAttrs.nativeBuildInputs or [ ] ++ [ - pkgs.buildPackages.pkg-config - ]; - separateDebugInfo = !stdenv.hostPlatform.isStatic; - hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie"; - env = - prevAttrs.env or { } - // lib.optionalAttrs ( - stdenv.isLinux - && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux") - && !(stdenv.hostPlatform.useLLVM or false) - ) { LDFLAGS = "-fuse-ld=gold"; }; - }; - - mesonLibraryLayer = finalAttrs: prevAttrs: { - outputs = prevAttrs.outputs or [ "out" ] ++ [ "dev" ]; - }; - - # Work around weird `--as-needed` linker behavior with BSD, see - # https://github.com/mesonbuild/meson/issues/3593 - bsdNoLinkAsNeeded = - finalAttrs: prevAttrs: - lib.optionalAttrs stdenv.hostPlatform.isBSD { - mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ]; - }; - - miscGoodPractice = finalAttrs: prevAttrs: { - strictDeps = prevAttrs.strictDeps or true; - enableParallelBuilding = true; - }; in scope: { inherit stdenv; @@ -174,56 +65,39 @@ scope: { installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase; }); - libgit2 = pkgs.libgit2.overrideAttrs (attrs: { - cmakeFlags = attrs.cmakeFlags or [ ] ++ [ "-DUSE_SSH=exec" ]; - nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - # gitMinimal does not build on Windows. See packbuilder patch. - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # Needed for `git apply`; see `prePatch` - pkgs.buildPackages.gitMinimal - ]; - # Only `git apply` can handle git binary patches - prePatch = - attrs.prePatch or "" - + lib.optionalString (!stdenv.hostPlatform.isWindows) '' - patch() { - git apply - } - ''; - patches = - attrs.patches or [ ] - ++ [ - ./patches/libgit2-mempack-thin-packfile.patch - ] - # gitMinimal does not build on Windows, but fortunately this patch only - # impacts interruptibility - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # binary patch; see `prePatch` - ./patches/libgit2-packbuilder-callback-interruptible.patch - ]; - }); - - inherit resolvePath filesetToSource; - - mkMesonDerivation = mkPackageBuilder [ - miscGoodPractice - localSourceLayer - mesonLayer - ]; - mkMesonExecutable = mkPackageBuilder [ - miscGoodPractice - bsdNoLinkAsNeeded - localSourceLayer - mesonLayer - mesonBuildLayer - ]; - mkMesonLibrary = mkPackageBuilder [ - miscGoodPractice - bsdNoLinkAsNeeded - localSourceLayer - mesonLayer - mesonBuildLayer - mesonLibraryLayer - ]; + libgit2 = pkgs.libgit2.overrideAttrs ( + attrs: + { + cmakeFlags = attrs.cmakeFlags or [ ] ++ [ "-DUSE_SSH=exec" ]; + } + # libgit2: Nixpkgs 24.11 has < 1.9.0, which needs our patches + // lib.optionalAttrs (!lib.versionAtLeast pkgs.libgit2.version "1.9.0") { + nativeBuildInputs = + attrs.nativeBuildInputs or [ ] + # gitMinimal does not build on Windows. See packbuilder patch. + ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ + # Needed for `git apply`; see `prePatch` + pkgs.buildPackages.gitMinimal + ]; + # Only `git apply` can handle git binary patches + prePatch = + attrs.prePatch or "" + + lib.optionalString (!stdenv.hostPlatform.isWindows) '' + patch() { + git apply + } + ''; + patches = + attrs.patches or [ ] + ++ [ + ./patches/libgit2-mempack-thin-packfile.patch + ] + # gitMinimal does not build on Windows, but fortunately this patch only + # impacts interruptibility + ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ + # binary patch; see `prePatch` + ./patches/libgit2-packbuilder-callback-interruptible.patch + ]; + } + ); } diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 8e1bb8936..1b6c37f35 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -1,6 +1,5 @@ { lib, - inputs, devFlake, }: @@ -117,7 +116,7 @@ pkgs.nixComponents.nix-util.overrideAttrs ( pkgs.buildPackages.changelog-d modular.pre-commit.settings.package (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) - inputs.nixfmt.packages.${pkgs.hostPlatform.system}.default + pkgs.buildPackages.nixfmt-rfc-style ] # TODO: Remove the darwin check once # https://github.com/NixOS/nixpkgs/pull/291814 is available diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 82ad7d862..88b704288 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -51,7 +51,7 @@ static bool allSupportedLocally(Store & store, const std::set& requ static int main_build_remote(int argc, char * * argv) { { - logger = makeJSONLogger(*logger); + logger = makeJSONLogger(getStandardError()); /* Ensure we don't get any SSH passphrase or host key popups. */ unsetenv("DISPLAY"); diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index adf816494..57e1774be 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -37,7 +37,7 @@ EvalSettings evalSettings { auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store); auto storePath = nix::fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy, lockedRef.input.getName()); state.allowPath(storePath); - return state.rootPath(state.store->toRealPath(storePath)); + return state.storePath(storePath); }, }, }, @@ -179,7 +179,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas state.fetchSettings, EvalSettings::resolvePseudoUrl(s)); auto storePath = fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy); - return state.rootPath(CanonPath(state.store->toRealPath(storePath))); + return state.storePath(storePath); } else if (hasPrefix(s, "flake:")) { @@ -188,7 +188,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store); auto storePath = nix::fetchToStore(*state.store, SourcePath(accessor), FetchMode::Copy, lockedRef.input.getName()); state.allowPath(storePath); - return state.rootPath(CanonPath(state.store->toRealPath(storePath))); + return state.storePath(storePath); } else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { diff --git a/src/libcmd/package.nix b/src/libcmd/package.nix index d155d9f1e..d459d1c20 100644 --- a/src/libcmd/package.nix +++ b/src/libcmd/package.nix @@ -64,14 +64,6 @@ mkMesonLibrary (finalAttrs: { nlohmann_json ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ (lib.mesonEnable "markdown" enableMarkdown) (lib.mesonOption "readline-flavor" readlineFlavor) diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index f292f06bb..3043be7a3 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -101,6 +101,9 @@ struct NixRepl Value & v, unsigned int maxDepth = std::numeric_limits::max()) { + // Hide the progress bar during printing because it might interfere + logger->pause(); + Finally resumeLoggerDefer([]() { logger->resume(); }); ::nix::printValue(*state, str, v, PrintOptions { .ansiColors = true, .force = true, diff --git a/src/libexpr-c/package.nix b/src/libexpr-c/package.nix index ad1ea371c..694fbc1fe 100644 --- a/src/libexpr-c/package.nix +++ b/src/libexpr-c/package.nix @@ -36,14 +36,6 @@ mkMesonLibrary (finalAttrs: { nix-expr ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libexpr-test-support/package.nix b/src/libexpr-test-support/package.nix index 5628d606a..44b0ff386 100644 --- a/src/libexpr-test-support/package.nix +++ b/src/libexpr-test-support/package.nix @@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: { rapidcheck ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libexpr-tests/package.nix b/src/libexpr-tests/package.nix index bb5acb7c8..51d52e935 100644 --- a/src/libexpr-tests/package.nix +++ b/src/libexpr-tests/package.nix @@ -46,14 +46,6 @@ mkMesonExecutable (finalAttrs: { gtest ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libexpr-tests/primops.cc b/src/libexpr-tests/primops.cc index 5b5898237..2bf726477 100644 --- a/src/libexpr-tests/primops.cc +++ b/src/libexpr-tests/primops.cc @@ -28,20 +28,15 @@ namespace nix { }; class CaptureLogging { - Logger * oldLogger; - std::unique_ptr tempLogger; + std::unique_ptr oldLogger; public: - CaptureLogging() : tempLogger(std::make_unique()) { - oldLogger = logger; - logger = tempLogger.get(); + CaptureLogging() { + oldLogger = std::move(logger); + logger = std::make_unique(); } ~CaptureLogging() { - logger = oldLogger; - } - - std::string get() const { - return tempLogger->get(); + logger = std::move(oldLogger); } }; @@ -113,7 +108,7 @@ namespace nix { CaptureLogging l; auto v = eval("builtins.trace \"test string 123\" 123"); ASSERT_THAT(v, IsIntEq(123)); - auto text = l.get(); + auto text = (dynamic_cast(logger.get()))->get(); ASSERT_NE(text.find("test string 123"), std::string::npos); } diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 4cbcb39b9..ade0abf9a 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -57,7 +57,7 @@ Strings EvalSettings::getDefaultNixPath() { Strings res; auto add = [&](const Path & p, const std::string & s = std::string()) { - if (pathAccessible(p)) { + if (std::filesystem::exists(p)) { if (s.empty()) { res.push_back(p); } else { diff --git a/src/libexpr/eval-settings.hh b/src/libexpr/eval-settings.hh index a8fcce539..fe947aefd 100644 --- a/src/libexpr/eval-settings.hh +++ b/src/libexpr/eval-settings.hh @@ -2,7 +2,6 @@ ///@file #include "config.hh" -#include "ref.hh" #include "source-path.hh" namespace nix { diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index dee764429..6a45f24b8 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -246,15 +246,42 @@ EvalState::EvalState( , repair(NoRepair) , emptyBindings(0) , rootFS( - settings.restrictEval || settings.pureEval - ? ref(AllowListSourceAccessor::create(getFSSourceAccessor(), {}, - [&settings](const CanonPath & path) -> RestrictedPathError { - auto modeInformation = settings.pureEval - ? "in pure evaluation mode (use '--impure' to override)" - : "in restricted mode"; - throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation); - })) - : getFSSourceAccessor()) + ({ + /* In pure eval mode, we provide a filesystem that only + contains the Nix store. + + If we have a chroot store and pure eval is not enabled, + use a union accessor to make the chroot store available + at its logical location while still having the + underlying directory available. This is necessary for + instance if we're evaluating a file from the physical + /nix/store while using a chroot store. */ + auto accessor = getFSSourceAccessor(); + + auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); + if (settings.pureEval || store->storeDir != realStoreDir) { + auto storeFS = makeMountedSourceAccessor( + { + {CanonPath::root, makeEmptySourceAccessor()}, + {CanonPath(store->storeDir), makeFSSourceAccessor(realStoreDir)} + }); + accessor = settings.pureEval + ? storeFS + : makeUnionSourceAccessor({accessor, storeFS}); + } + + /* Apply access control if needed. */ + if (settings.restrictEval || settings.pureEval) + accessor = AllowListSourceAccessor::create(accessor, {}, + [&settings](const CanonPath & path) -> RestrictedPathError { + auto modeInformation = settings.pureEval + ? "in pure evaluation mode (use '--impure' to override)" + : "in restricted mode"; + throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation); + }); + + accessor; + })) , corepkgsFS(make_ref()) , internalFS(make_ref()) , derivationInternal{corepkgsFS->addFile( @@ -344,7 +371,7 @@ void EvalState::allowPath(const Path & path) void EvalState::allowPath(const StorePath & storePath) { if (auto rootFS2 = rootFS.dynamic_pointer_cast()) - rootFS2->allowPrefix(CanonPath(store->toRealPath(storePath))); + rootFS2->allowPrefix(CanonPath(store->printStorePath(storePath))); } void EvalState::allowClosure(const StorePath & storePath) @@ -422,16 +449,6 @@ void EvalState::checkURI(const std::string & uri) } -Path EvalState::toRealPath(const Path & path, const NixStringContext & context) -{ - // FIXME: check whether 'path' is in 'context'. - return - !context.empty() && store->isInStore(path) - ? store->toRealPath(path) - : path; -} - - Value * EvalState::addConstant(const std::string & name, Value & v, Constant info) { Value * v2 = allocValue(); @@ -2051,7 +2068,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) else if (firstType == nPath) { if (!context.empty()) state.error("a string that refers to a store path cannot be appended to a path").atPos(pos).withFrame(env, *this).debugThrow(); - v.mkPath(state.rootPath(CanonPath(canonPath(str())))); + v.mkPath(state.rootPath(CanonPath(str()))); } else v.mkStringMove(c_str(), context); } @@ -2432,7 +2449,7 @@ SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (path == "" || path[0] != '/') error("string '%1%' doesn't represent an absolute path", path).withTrace(pos, errorCtx).debugThrow(); - return rootPath(CanonPath(path)); + return rootPath(path); } @@ -3070,8 +3087,11 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat auto i = lookupPathResolved.find(value); if (i != lookupPathResolved.end()) return i->second; - auto finish = [&](SourcePath res) { - debug("resolved search path element '%s' to '%s'", value, res); + auto finish = [&](std::optional res) { + if (res) + debug("resolved search path element '%s' to '%s'", value, *res); + else + debug("failed to resolve search path element '%s'", value); lookupPathResolved.emplace(value, res); return res; }; @@ -3083,7 +3103,7 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat fetchSettings, EvalSettings::resolvePseudoUrl(value)); auto storePath = fetchToStore(*store, SourcePath(accessor), FetchMode::Copy); - return finish(rootPath(store->toRealPath(storePath))); + return finish(this->storePath(storePath)); } catch (Error & e) { logWarning({ .msg = HintFmt("Nix search path entry '%1%' cannot be downloaded, ignoring", value) @@ -3123,8 +3143,7 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat } } - debug("failed to resolve search path element '%s'", value); - return std::nullopt; + return finish(std::nullopt); } diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 767578343..b11e40c30 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -389,6 +389,15 @@ public: */ SourcePath rootPath(PathView path); + /** + * Return a `SourcePath` that refers to `path` in the store. + * + * For now, this has to also be within the root filesystem for + * backwards compat, but for Windows and maybe also pure eval, we'll + * probably want to do something different. + */ + SourcePath storePath(const StorePath & path); + /** * Allow access to a path. */ @@ -412,17 +421,6 @@ public: void checkURI(const std::string & uri); - /** - * When using a diverted store and 'path' is in the Nix store, map - * 'path' to the diverted location (e.g. /nix/store/foo is mapped - * to /home/alice/my-nix/nix/store/foo). However, this is only - * done if the context is not empty, since otherwise we're - * probably trying to read from the actual /nix/store. This is - * intended to distinguish between import-from-derivation and - * sources stored in the actual /nix/store. - */ - Path toRealPath(const Path & path, const NixStringContext & context); - /** * Parse a Nix expression from the specified file. */ diff --git a/src/libexpr/package.nix b/src/libexpr/package.nix index afd01c384..533dae9f2 100644 --- a/src/libexpr/package.nix +++ b/src/libexpr/package.nix @@ -77,14 +77,6 @@ mkMesonLibrary (finalAttrs: { nlohmann_json ] ++ lib.optional enableGC boehmgc; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ (lib.mesonEnable "gc" enableGC) ]; diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index 50d0d9895..3d602ae2d 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -1,3 +1,4 @@ +#include "store-api.hh" #include "eval.hh" namespace nix { @@ -12,4 +13,9 @@ SourcePath EvalState::rootPath(PathView path) return {rootFS, CanonPath(absPath(path))}; } +SourcePath EvalState::storePath(const StorePath & path) +{ + return {rootFS, CanonPath{store->printStorePath(path)}}; +} + } diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 51d2991e7..7c9ce7104 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -145,8 +145,7 @@ static SourcePath realisePath(EvalState & state, const PosIdx pos, Value & v, st try { if (!context.empty() && path.accessor == state.rootFS) { auto rewrites = state.realiseContext(context); - auto realPath = state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context); - path = {path.accessor, CanonPath(realPath)}; + path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))}; } return resolveSymlinks ? path.resolveSymlinks(*resolveSymlinks) : path; } catch (Error & e) { @@ -2479,21 +2478,11 @@ static void addPath( const NixStringContext & context) { try { - StorePathSet refs; - if (path.accessor == state.rootFS && state.store->isInStore(path.path.abs())) { // FIXME: handle CA derivation outputs (where path needs to // be rewritten to the actual output). auto rewrites = state.realiseContext(context); - path = {state.rootFS, CanonPath(state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context))}; - - try { - auto [storePath, subPath] = state.store->toStorePath(path.path.abs()); - // FIXME: we should scanForReferences on the path before adding it - refs = state.store->queryPathInfo(storePath)->references; - path = {state.rootFS, CanonPath(state.store->toRealPath(storePath) + subPath)}; - } catch (Error &) { // FIXME: should be InvalidPathError - } + path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))}; } std::unique_ptr filter; diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index c4b8b2999..bd013eab2 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -367,6 +367,12 @@ static RegisterPrimOp primop_fetchTree({ Default: `false` + - `lfs` (Bool, optional) + + Fetch any [Git LFS](https://git-lfs.com/) files. + + Default: `false` + - `allRefs` (Bool, optional) By default, this has no effect. This becomes relevant only once `shallow` cloning is disabled. @@ -691,6 +697,13 @@ static RegisterPrimOp primop_fetchGit({ Make a shallow clone when fetching the Git tree. When this is enabled, the options `ref` and `allRefs` have no effect anymore. + + - `lfs` (default: `false`) + + A boolean that when `true` specifies that [Git LFS] files should be fetched. + + [Git LFS]: https://git-lfs.com/ + - `allRefs` Whether to fetch all references (eg. branches and tags) of the repository. diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index 0bf3076dc..51622a955 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -7,13 +7,18 @@ #include #include "fs-sink.hh" #include "serialise.hh" +#include "git-lfs-fetch.hh" namespace nix { +namespace fs { +using namespace std::filesystem; +} + class GitUtilsTest : public ::testing::Test { // We use a single repository for all tests. - Path tmpDir; + fs::path tmpDir; std::unique_ptr delTmpDir; public: @@ -41,6 +46,11 @@ public: { return GitRepo::openRepo(tmpDir, true, false); } + + std::string getRepoName() const + { + return tmpDir.filename(); + } }; void writeString(CreateRegularFileSink & fileSink, std::string contents, bool executable) @@ -78,7 +88,7 @@ TEST_F(GitUtilsTest, sink_basic) // sink->createHardlink("foo-1.1/links/foo-2", CanonPath("foo-1.1/hello")); auto result = repo->dereferenceSingletonDirectory(sink->flush()); - auto accessor = repo->getAccessor(result, false); + auto accessor = repo->getAccessor(result, false, getRepoName()); auto entries = accessor->readDirectory(CanonPath::root); ASSERT_EQ(entries.size(), 5); ASSERT_EQ(accessor->readFile(CanonPath("hello")), "hello world"); diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 243bbab80..b60ff5675 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -31,6 +31,9 @@ deps_private += rapidcheck gtest = dependency('gtest', main : true) deps_private += gtest +libgit2 = dependency('libgit2') +deps_private += libgit2 + add_project_arguments( # TODO(Qyriad): Yes this is how the autoconf+Make system did it. # It would be nice for our headers to be idempotent instead. @@ -43,8 +46,9 @@ add_project_arguments( subdir('nix-meson-build-support/common') sources = files( - 'public-key.cc', 'access-tokens.cc', + 'git-utils.cc', + 'public-key.cc', ) include_dirs = [include_directories('.')] diff --git a/src/libfetchers-tests/package.nix b/src/libfetchers-tests/package.nix index f2680e9b3..6e3581183 100644 --- a/src/libfetchers-tests/package.nix +++ b/src/libfetchers-tests/package.nix @@ -7,6 +7,7 @@ nix-fetchers, nix-store-test-support, + libgit2, rapidcheck, gtest, runCommand, @@ -42,16 +43,9 @@ mkMesonExecutable (finalAttrs: { nix-store-test-support rapidcheck gtest + libgit2 ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libfetchers/git-lfs-fetch.cc b/src/libfetchers/git-lfs-fetch.cc new file mode 100644 index 000000000..bd6c01435 --- /dev/null +++ b/src/libfetchers/git-lfs-fetch.cc @@ -0,0 +1,279 @@ +#include "git-lfs-fetch.hh" +#include "git-utils.hh" +#include "filetransfer.hh" +#include "processes.hh" +#include "url.hh" +#include "users.hh" +#include "hash.hh" + +#include +#include +#include +#include + +#include + +namespace nix::lfs { + +// if authHeader is "", downloadToSink assumes no auth is expected +static void downloadToSink( + const std::string & url, + const std::string & authHeader, + // FIXME: passing a StringSink is superfluous, we may as well + // return a string. Or use an abstract Sink for streaming. + StringSink & sink, + std::string sha256Expected, + size_t sizeExpected) +{ + FileTransferRequest request(url); + Headers headers; + if (!authHeader.empty()) + headers.push_back({"Authorization", authHeader}); + request.headers = headers; + getFileTransfer()->download(std::move(request), sink); + + auto sizeActual = sink.s.length(); + if (sizeExpected != sizeActual) + throw Error("size mismatch while fetching %s: expected %d but got %d", url, sizeExpected, sizeActual); + + auto sha256Actual = hashString(HashAlgorithm::SHA256, sink.s).to_string(HashFormat::Base16, false); + if (sha256Actual != sha256Expected) + throw Error( + "hash mismatch while fetching %s: expected sha256:%s but got sha256:%s", url, sha256Expected, sha256Actual); +} + +static std::string getLfsApiToken(const ParsedURL & url) +{ + auto [status, output] = runProgram(RunOptions{ + .program = "ssh", + .args = {*url.authority, "git-lfs-authenticate", url.path, "download"}, + }); + + if (output.empty()) + throw Error( + "git-lfs-authenticate: no output (cmd: ssh %s git-lfs-authenticate %s download)", + url.authority.value_or(""), + url.path); + + auto queryResp = nlohmann::json::parse(output); + if (!queryResp.contains("header")) + throw Error("no header in git-lfs-authenticate response"); + if (!queryResp["header"].contains("Authorization")) + throw Error("no Authorization in git-lfs-authenticate response"); + + return queryResp["header"]["Authorization"].get(); +} + +typedef std::unique_ptr> GitConfig; +typedef std::unique_ptr> GitConfigEntry; + +static std::string getLfsEndpointUrl(git_repository * repo) +{ + GitConfig config; + if (git_repository_config(Setter(config), repo)) { + GitConfigEntry entry; + if (!git_config_get_entry(Setter(entry), config.get(), "lfs.url")) { + auto value = std::string(entry->value); + if (!value.empty()) { + debug("Found explicit lfs.url value: %s", value); + return value; + } + } + } + + git_remote * remote = nullptr; + if (git_remote_lookup(&remote, repo, "origin")) + return ""; + + const char * url_c_str = git_remote_url(remote); + if (!url_c_str) + return ""; + + return std::string(url_c_str); +} + +static std::optional parseLfsPointer(std::string_view content, std::string_view filename) +{ + // https://github.com/git-lfs/git-lfs/blob/2ef4108/docs/spec.md + // + // example git-lfs pointer file: + // version https://git-lfs.github.com/spec/v1 + // oid sha256:f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf + // size 10000000 + // (ending \n) + + if (!content.starts_with("version ")) { + // Invalid pointer file + return std::nullopt; + } + + if (!content.starts_with("version https://git-lfs.github.com/spec/v1")) { + // In case there's new spec versions in the future, but for now only v1 exists + debug("Invalid version found on potential lfs pointer file, skipping"); + return std::nullopt; + } + + std::string oid; + std::string size; + + for (auto & line : tokenizeString(content, "\n")) { + if (line.starts_with("version ")) { + continue; + } + if (line.starts_with("oid sha256:")) { + oid = line.substr(11); // skip "oid sha256:" + continue; + } + if (line.starts_with("size ")) { + size = line.substr(5); // skip "size " + continue; + } + + debug("Custom extension '%s' found, ignoring", line); + } + + if (oid.length() != 64 || !std::all_of(oid.begin(), oid.end(), ::isxdigit)) { + debug("Invalid sha256 %s, skipping", oid); + return std::nullopt; + } + + if (size.length() == 0 || !std::all_of(size.begin(), size.end(), ::isdigit)) { + debug("Invalid size %s, skipping", size); + return std::nullopt; + } + + return std::make_optional(Pointer{oid, std::stoul(size)}); +} + +Fetch::Fetch(git_repository * repo, git_oid rev) +{ + this->repo = repo; + this->rev = rev; + + const auto remoteUrl = lfs::getLfsEndpointUrl(repo); + + this->url = nix::parseURL(nix::fixGitURL(remoteUrl)).canonicalise(); +} + +bool Fetch::shouldFetch(const CanonPath & path) const +{ + const char * attr = nullptr; + git_attr_options opts = GIT_ATTR_OPTIONS_INIT; + opts.attr_commit_id = this->rev; + opts.flags = GIT_ATTR_CHECK_INCLUDE_COMMIT | GIT_ATTR_CHECK_NO_SYSTEM; + if (git_attr_get_ext(&attr, (git_repository *) (this->repo), &opts, path.rel_c_str(), "filter")) + throw Error("cannot get git-lfs attribute: %s", git_error_last()->message); + debug("Git filter for '%s' is '%s'", path, attr ? attr : "null"); + return attr != nullptr && !std::string(attr).compare("lfs"); +} + +static nlohmann::json pointerToPayload(const std::vector & items) +{ + nlohmann::json jArray = nlohmann::json::array(); + for (const auto & pointer : items) + jArray.push_back({{"oid", pointer.oid}, {"size", pointer.size}}); + return jArray; +} + +std::vector Fetch::fetchUrls(const std::vector & pointers) const +{ + ParsedURL httpUrl(url); + httpUrl.scheme = url.scheme == "ssh" ? "https" : url.scheme; + FileTransferRequest request(httpUrl.to_string() + "/info/lfs/objects/batch"); + request.post = true; + Headers headers; + if (this->url.scheme == "ssh") + headers.push_back({"Authorization", lfs::getLfsApiToken(this->url)}); + headers.push_back({"Content-Type", "application/vnd.git-lfs+json"}); + headers.push_back({"Accept", "application/vnd.git-lfs+json"}); + request.headers = headers; + nlohmann::json oidList = pointerToPayload(pointers); + nlohmann::json data = {{"operation", "download"}}; + data["objects"] = oidList; + request.data = data.dump(); + + FileTransferResult result = getFileTransfer()->upload(request); + auto responseString = result.data; + + std::vector objects; + // example resp here: + // {"objects":[{"oid":"f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf","size":10000000,"actions":{"download":{"href":"https://gitlab.com/b-camacho/test-lfs.git/gitlab-lfs/objects/f5e02aa71e67f41d79023a128ca35bad86cf7b6656967bfe0884b3a3c4325eaf","header":{"Authorization":"Basic + // Yi1jYW1hY2hvOmV5SjBlWEFpT2lKS1YxUWlMQ0poYkdjaU9pSklVekkxTmlKOS5leUprWVhSaElqcDdJbUZqZEc5eUlqb2lZaTFqWVcxaFkyaHZJbjBzSW1wMGFTSTZJbUptTURZNFpXVTFMVEprWmpVdE5HWm1ZUzFpWWpRMExUSXpNVEV3WVRReU1qWmtaaUlzSW1saGRDSTZNVGN4TkRZeE16ZzBOU3dpYm1KbUlqb3hOekUwTmpFek9EUXdMQ0psZUhBaU9qRTNNVFEyTWpFd05EVjkuZk9yMDNkYjBWSTFXQzFZaTBKRmJUNnJTTHJPZlBwVW9lYllkT0NQZlJ4QQ=="}}},"authenticated":true}]} + + try { + auto resp = nlohmann::json::parse(responseString); + if (resp.contains("objects")) + objects.insert(objects.end(), resp["objects"].begin(), resp["objects"].end()); + else + throw Error("response does not contain 'objects'"); + + return objects; + } catch (const nlohmann::json::parse_error & e) { + printMsg(lvlTalkative, "Full response: '%1%'", responseString); + throw Error("response did not parse as json: %s", e.what()); + } +} + +void Fetch::fetch( + const std::string & content, + const CanonPath & pointerFilePath, + StringSink & sink, + std::function sizeCallback) const +{ + debug("trying to fetch '%s' using git-lfs", pointerFilePath); + + if (content.length() >= 1024) { + warn("encountered file '%s' that should have been a git-lfs pointer, but is too large", pointerFilePath); + sizeCallback(content.length()); + sink(content); + return; + } + + const auto pointer = parseLfsPointer(content, pointerFilePath.rel()); + if (pointer == std::nullopt) { + warn("encountered file '%s' that should have been a git-lfs pointer, but is invalid", pointerFilePath); + sizeCallback(content.length()); + sink(content); + return; + } + + Path cacheDir = getCacheDir() + "/git-lfs"; + std::string key = hashString(HashAlgorithm::SHA256, pointerFilePath.rel()).to_string(HashFormat::Base16, false) + + "/" + pointer->oid; + Path cachePath = cacheDir + "/" + key; + if (pathExists(cachePath)) { + debug("using cache entry %s -> %s", key, cachePath); + sink(readFile(cachePath)); + return; + } + debug("did not find cache entry for %s", key); + + std::vector pointers; + pointers.push_back(pointer.value()); + const auto objUrls = fetchUrls(pointers); + + const auto obj = objUrls[0]; + try { + std::string sha256 = obj.at("oid"); // oid is also the sha256 + std::string ourl = obj.at("actions").at("download").at("href"); + std::string authHeader = ""; + if (obj.at("actions").at("download").contains("header") + && obj.at("actions").at("download").at("header").contains("Authorization")) { + authHeader = obj["actions"]["download"]["header"]["Authorization"]; + } + const uint64_t size = obj.at("size"); + sizeCallback(size); + downloadToSink(ourl, authHeader, sink, sha256, size); + + debug("creating cache entry %s -> %s", key, cachePath); + if (!pathExists(dirOf(cachePath))) + createDirs(dirOf(cachePath)); + writeFile(cachePath, sink.s); + + debug("%s fetched with git-lfs", pointerFilePath); + } catch (const nlohmann::json::out_of_range & e) { + throw Error("bad json from /info/lfs/objects/batch: %s %s", obj, e.what()); + } +} + +} // namespace nix::lfs diff --git a/src/libfetchers/git-lfs-fetch.hh b/src/libfetchers/git-lfs-fetch.hh new file mode 100644 index 000000000..36df91962 --- /dev/null +++ b/src/libfetchers/git-lfs-fetch.hh @@ -0,0 +1,43 @@ +#include "canon-path.hh" +#include "serialise.hh" +#include "url.hh" + +#include + +#include + +namespace nix::lfs { + +/** + * git-lfs pointer + * @see https://github.com/git-lfs/git-lfs/blob/2ef4108/docs/spec.md + */ +struct Pointer +{ + std::string oid; // git-lfs managed object id. you give this to the lfs server + // for downloads + size_t size; // in bytes +}; + +struct Fetch +{ + // Reference to the repository + const git_repository * repo; + + // Git commit being fetched + git_oid rev; + + // derived from git remote url + nix::ParsedURL url; + + Fetch(git_repository * repo, git_oid rev); + bool shouldFetch(const CanonPath & path) const; + void fetch( + const std::string & content, + const CanonPath & pointerFilePath, + StringSink & sink, + std::function sizeCallback) const; + std::vector fetchUrls(const std::vector & pointers) const; +}; + +} // namespace nix::lfs diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index a6b13fb31..a2761a543 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -1,4 +1,5 @@ #include "git-utils.hh" +#include "git-lfs-fetch.hh" #include "cache.hh" #include "finally.hh" #include "processes.hh" @@ -60,14 +61,6 @@ namespace nix { struct GitSourceAccessor; -// Some wrapper types that ensure that the git_*_free functions get called. -template -struct Deleter -{ - template - void operator()(T * p) const { del(p); }; -}; - typedef std::unique_ptr> Repository; typedef std::unique_ptr> TreeEntry; typedef std::unique_ptr> Tree; @@ -85,20 +78,6 @@ typedef std::unique_ptr> ObjectDb; typedef std::unique_ptr> PackBuilder; typedef std::unique_ptr> Indexer; -// A helper to ensure that we don't leak objects returned by libgit2. -template -struct Setter -{ - T & t; - typename T::pointer p = nullptr; - - Setter(T & t) : t(t) { } - - ~Setter() { if (p) t = T(p); } - - operator typename T::pointer * () { return &p; } -}; - Hash toHash(const git_oid & oid) { #ifdef GIT_EXPERIMENTAL_SHA256 @@ -506,12 +485,15 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this /** * A 'GitSourceAccessor' with no regard for export-ignore or any other transformations. */ - ref getRawAccessor(const Hash & rev); + ref getRawAccessor( + const Hash & rev, + bool smudgeLfs = false); ref getAccessor( const Hash & rev, bool exportIgnore, - std::string displayPrefix) override; + std::string displayPrefix, + bool smudgeLfs = false) override; ref getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError e) override; @@ -670,24 +652,40 @@ ref GitRepo::openRepo(const std::filesystem::path & path, bool create, /** * Raw git tree input accessor. */ + struct GitSourceAccessor : SourceAccessor { ref repo; Object root; + std::optional lfsFetch = std::nullopt; - GitSourceAccessor(ref repo_, const Hash & rev) + GitSourceAccessor(ref repo_, const Hash & rev, bool smudgeLfs) : repo(repo_) , root(peelToTreeOrBlob(lookupObject(*repo, hashToOID(rev)).get())) { + if (smudgeLfs) + lfsFetch = std::make_optional(lfs::Fetch(*repo, hashToOID(rev))); } std::string readBlob(const CanonPath & path, bool symlink) { - auto blob = getBlob(path, symlink); + const auto blob = getBlob(path, symlink); - auto data = std::string_view((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); + if (lfsFetch) { + if (lfsFetch->shouldFetch(path)) { + StringSink s; + try { + auto contents = std::string((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); + lfsFetch->fetch(contents, path, s, [&s](uint64_t size){ s.s.reserve(size); }); + } catch (Error & e) { + e.addTrace({}, "while smudging git-lfs file '%s'", path); + throw; + } + return s.s; + } + } - return std::string(data); + return std::string((const char *) git_blob_rawcontent(blob.get()), git_blob_rawsize(blob.get())); } std::string readFile(const CanonPath & path) override @@ -1191,19 +1189,22 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink } }; -ref GitRepoImpl::getRawAccessor(const Hash & rev) +ref GitRepoImpl::getRawAccessor( + const Hash & rev, + bool smudgeLfs) { auto self = ref(shared_from_this()); - return make_ref(self, rev); + return make_ref(self, rev, smudgeLfs); } ref GitRepoImpl::getAccessor( const Hash & rev, bool exportIgnore, - std::string displayPrefix) + std::string displayPrefix, + bool smudgeLfs) { auto self = ref(shared_from_this()); - ref rawGitAccessor = getRawAccessor(rev); + ref rawGitAccessor = getRawAccessor(rev, smudgeLfs); rawGitAccessor->setPathDisplay(std::move(displayPrefix)); if (exportIgnore) return make_ref(self, rawGitAccessor, rev); diff --git a/src/libfetchers/git-utils.hh b/src/libfetchers/git-utils.hh index 9677f5079..c683bd058 100644 --- a/src/libfetchers/git-utils.hh +++ b/src/libfetchers/git-utils.hh @@ -89,7 +89,8 @@ struct GitRepo virtual ref getAccessor( const Hash & rev, bool exportIgnore, - std::string displayPrefix) = 0; + std::string displayPrefix, + bool smudgeLfs = false) = 0; virtual ref getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllowedError makeNotAllowedError) = 0; @@ -126,4 +127,26 @@ struct GitRepo ref getTarballCache(); +// A helper to ensure that the `git_*_free` functions get called. +template +struct Deleter +{ + template + void operator()(T * p) const { del(p); }; +}; + +// A helper to ensure that we don't leak objects returned by libgit2. +template +struct Setter +{ + T & t; + typename T::pointer p = nullptr; + + Setter(T & t) : t(t) { } + + ~Setter() { if (p) t = T(p); } + + operator typename T::pointer * () { return &p; } +}; + } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 0d423a7a3..8d4d9abcc 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -9,7 +9,6 @@ #include "pathlocks.hh" #include "processes.hh" #include "git.hh" -#include "mounted-source-accessor.hh" #include "git-utils.hh" #include "logging.hh" #include "finally.hh" @@ -185,7 +184,7 @@ struct GitInputScheme : InputScheme for (auto & [name, value] : url.query) { if (name == "rev" || name == "ref" || name == "keytype" || name == "publicKey" || name == "publicKeys") attrs.emplace(name, value); - else if (name == "shallow" || name == "submodules" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit") + else if (name == "shallow" || name == "submodules" || name == "lfs" || name == "exportIgnore" || name == "allRefs" || name == "verifyCommit") attrs.emplace(name, Explicit { value == "1" }); else url2.query.emplace(name, value); @@ -210,6 +209,7 @@ struct GitInputScheme : InputScheme "rev", "shallow", "submodules", + "lfs", "exportIgnore", "lastModified", "revCount", @@ -262,6 +262,8 @@ struct GitInputScheme : InputScheme if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref); if (getShallowAttr(input)) url.query.insert_or_assign("shallow", "1"); + if (getLfsAttr(input)) + url.query.insert_or_assign("lfs", "1"); if (getSubmodulesAttr(input)) url.query.insert_or_assign("submodules", "1"); if (maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false)) @@ -411,6 +413,11 @@ struct GitInputScheme : InputScheme return maybeGetBoolAttr(input.attrs, "submodules").value_or(false); } + bool getLfsAttr(const Input & input) const + { + return maybeGetBoolAttr(input.attrs, "lfs").value_or(false); + } + bool getExportIgnoreAttr(const Input & input) const { return maybeGetBoolAttr(input.attrs, "exportIgnore").value_or(false); @@ -678,7 +685,8 @@ struct GitInputScheme : InputScheme verifyCommit(input, repo); bool exportIgnore = getExportIgnoreAttr(input); - auto accessor = repo->getAccessor(rev, exportIgnore, "«" + input.to_string() + "»"); + bool smudgeLfs = getLfsAttr(input); + auto accessor = repo->getAccessor(rev, exportIgnore, "«" + input.to_string() + "»", smudgeLfs); /* If the repo has submodules, fetch them and return a mounted input accessor consisting of the accessor for the top-level @@ -698,6 +706,7 @@ struct GitInputScheme : InputScheme attrs.insert_or_assign("rev", submoduleRev.gitRev()); attrs.insert_or_assign("exportIgnore", Explicit{ exportIgnore }); attrs.insert_or_assign("submodules", Explicit{ true }); + attrs.insert_or_assign("lfs", Explicit{ smudgeLfs }); attrs.insert_or_assign("allRefs", Explicit{ true }); auto submoduleInput = fetchers::Input::fromAttrs(*input.settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = @@ -838,7 +847,7 @@ struct GitInputScheme : InputScheme { auto makeFingerprint = [&](const Hash & rev) { - return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : ""); + return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "") + (getLfsAttr(input) ? ";l" : ""); }; if (auto rev = input.getRev()) diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 58afbb7d0..725254b56 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -14,7 +14,7 @@ cxx = meson.get_compiler('cpp') subdir('nix-meson-build-support/deps-lists') -configdata = configuration_data() +configuration_data() deps_private_maybe_subproject = [ ] @@ -48,12 +48,12 @@ sources = files( 'fetch-to-store.cc', 'fetchers.cc', 'filtering-source-accessor.cc', + 'git-lfs-fetch.cc', 'git-utils.cc', 'git.cc', 'github.cc', 'indirect.cc', 'mercurial.cc', - 'mounted-source-accessor.cc', 'path.cc', 'registry.cc', 'store-path-accessor.cc', @@ -69,8 +69,8 @@ headers = files( 'fetch-to-store.hh', 'fetchers.hh', 'filtering-source-accessor.hh', + 'git-lfs-fetch.hh', 'git-utils.hh', - 'mounted-source-accessor.hh', 'registry.hh', 'store-path-accessor.hh', 'tarball.hh', diff --git a/src/libfetchers/mounted-source-accessor.hh b/src/libfetchers/mounted-source-accessor.hh deleted file mode 100644 index 45cbcb09a..000000000 --- a/src/libfetchers/mounted-source-accessor.hh +++ /dev/null @@ -1,9 +0,0 @@ -#pragma once - -#include "source-accessor.hh" - -namespace nix { - -ref makeMountedSourceAccessor(std::map> mounts); - -} diff --git a/src/libfetchers/package.nix b/src/libfetchers/package.nix index b0aecd049..3f52e9878 100644 --- a/src/libfetchers/package.nix +++ b/src/libfetchers/package.nix @@ -41,14 +41,6 @@ mkMesonLibrary (finalAttrs: { nlohmann_json ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - meta = { platforms = lib.platforms.unix ++ lib.platforms.windows; }; diff --git a/src/libflake-c/package.nix b/src/libflake-c/package.nix index f0615a427..114950852 100644 --- a/src/libflake-c/package.nix +++ b/src/libflake-c/package.nix @@ -38,14 +38,6 @@ mkMesonLibrary (finalAttrs: { nix-flake ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libflake-tests/package.nix b/src/libflake-tests/package.nix index f9d9b0bc0..714f3791a 100644 --- a/src/libflake-tests/package.nix +++ b/src/libflake-tests/package.nix @@ -46,14 +46,6 @@ mkMesonExecutable (finalAttrs: { gtest ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libflake/flake/flake.cc b/src/libflake/flake/flake.cc index 443ad9a2a..e573c55c4 100644 --- a/src/libflake/flake/flake.cc +++ b/src/libflake/flake/flake.cc @@ -337,7 +337,7 @@ static Flake readFlake( auto storePath = fetchToStore(*state.store, setting.value->path(), FetchMode::Copy); flake.config.settings.emplace( state.symbols[setting.name], - state.store->toRealPath(storePath)); + state.store->printStorePath(storePath)); } else if (setting.value->type() == nInt) flake.config.settings.emplace( @@ -381,7 +381,7 @@ static FlakeRef applySelfAttrs( { auto newRef(ref); - std::set allowedAttrs{"submodules"}; + std::set allowedAttrs{"submodules", "lfs"}; for (auto & attr : flake.selfAttrs) { if (!allowedAttrs.contains(attr.first)) @@ -423,7 +423,7 @@ static Flake getFlake( auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, accessor); // Re-parse flake.nix from the store. - return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootAttrPath); + return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries) @@ -784,7 +784,7 @@ LockedFlake lockFlake( // FIXME: allow input to be lazy. auto storePath = copyInputToStore(state, lockedRef.input, input.ref->input, accessor); - return {state.rootPath(state.store->toRealPath(storePath)), lockedRef}; + return {state.storePath(storePath), lockedRef}; } }(); @@ -921,21 +921,6 @@ LockedFlake lockFlake( } } -std::pair sourcePathToStorePath( - ref store, - const SourcePath & _path) -{ - auto path = _path.path.abs(); - - if (auto store2 = store.dynamic_pointer_cast()) { - auto realStoreDir = store2->getRealStoreDir(); - if (isInDir(path, realStoreDir)) - path = store2->storeDir + path.substr(realStoreDir.size()); - } - - return store->toStorePath(path); -} - void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) @@ -953,7 +938,7 @@ void callFlake(EvalState & state, auto lockedNode = node.dynamic_pointer_cast(); - auto [storePath, subdir] = sourcePathToStorePath(state.store, sourcePath); + auto [storePath, subdir] = state.store->toStorePath(sourcePath.path.abs()); emitTreeAttrs( state, diff --git a/src/libflake/flake/flake.hh b/src/libflake/flake/flake.hh index 8d9b9a698..d8cd9aac0 100644 --- a/src/libflake/flake/flake.hh +++ b/src/libflake/flake/flake.hh @@ -234,16 +234,6 @@ void callFlake( const LockedFlake & lockedFlake, Value & v); -/** - * Map a `SourcePath` to the corresponding store path. This is a - * temporary hack to support chroot stores while we don't have full - * lazy trees. FIXME: Remove this once we can pass a sourcePath rather - * than a storePath to call-flake.nix. - */ -std::pair sourcePathToStorePath( - ref store, - const SourcePath & path); - } void emitTreeAttrs( diff --git a/src/libflake/package.nix b/src/libflake/package.nix index ebd38e140..5240ce5e3 100644 --- a/src/libflake/package.nix +++ b/src/libflake/package.nix @@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: { nlohmann_json ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - meta = { platforms = lib.platforms.unix ++ lib.platforms.windows; }; diff --git a/src/libmain-c/package.nix b/src/libmain-c/package.nix index cf710e03b..f019a917d 100644 --- a/src/libmain-c/package.nix +++ b/src/libmain-c/package.nix @@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: { nix-main ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libmain/loggers.cc b/src/libmain/loggers.cc index a4e0530c8..836ea3dc8 100644 --- a/src/libmain/loggers.cc +++ b/src/libmain/loggers.cc @@ -6,7 +6,8 @@ namespace nix { LogFormat defaultLogFormat = LogFormat::raw; -LogFormat parseLogFormat(const std::string & logFormatStr) { +LogFormat parseLogFormat(const std::string & logFormatStr) +{ if (logFormatStr == "raw" || getEnv("NIX_GET_COMPLETIONS")) return LogFormat::raw; else if (logFormatStr == "raw-with-logs") @@ -20,14 +21,15 @@ LogFormat parseLogFormat(const std::string & logFormatStr) { throw Error("option 'log-format' has an invalid value '%s'", logFormatStr); } -Logger * makeDefaultLogger() { +std::unique_ptr makeDefaultLogger() +{ switch (defaultLogFormat) { case LogFormat::raw: return makeSimpleLogger(false); case LogFormat::rawWithLogs: return makeSimpleLogger(true); case LogFormat::internalJSON: - return makeJSONLogger(*makeSimpleLogger(true)); + return makeJSONLogger(getStandardError()); case LogFormat::bar: return makeProgressBar(); case LogFormat::barWithLogs: { @@ -40,16 +42,19 @@ Logger * makeDefaultLogger() { } } -void setLogFormat(const std::string & logFormatStr) { +void setLogFormat(const std::string & logFormatStr) +{ setLogFormat(parseLogFormat(logFormatStr)); } -void setLogFormat(const LogFormat & logFormat) { +void setLogFormat(const LogFormat & logFormat) +{ defaultLogFormat = logFormat; createDefaultLogger(); } -void createDefaultLogger() { +void createDefaultLogger() +{ logger = makeDefaultLogger(); } diff --git a/src/libmain/package.nix b/src/libmain/package.nix index 046b505df..c03697c48 100644 --- a/src/libmain/package.nix +++ b/src/libmain/package.nix @@ -37,14 +37,6 @@ mkMesonLibrary (finalAttrs: { openssl ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - meta = { platforms = lib.platforms.unix ++ lib.platforms.windows; }; diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index 961850b58..17109b57e 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -117,13 +117,15 @@ public: { { auto state(state_.lock()); - if (!state->active) return; - state->active = false; - writeToStderr("\r\e[K"); - updateCV.notify_one(); - quitCV.notify_one(); + if (state->active) { + state->active = false; + writeToStderr("\r\e[K"); + updateCV.notify_one(); + quitCV.notify_one(); + } } - updateThread.join(); + if (updateThread.joinable()) + updateThread.join(); } void pause() override { @@ -553,9 +555,9 @@ public: } }; -Logger * makeProgressBar() +std::unique_ptr makeProgressBar() { - return new ProgressBar(isTTY()); + return std::make_unique(isTTY()); } void startProgressBar() @@ -565,9 +567,8 @@ void startProgressBar() void stopProgressBar() { - auto progressBar = dynamic_cast(logger); - if (progressBar) progressBar->stop(); - + if (auto progressBar = dynamic_cast(logger.get())) + progressBar->stop(); } } diff --git a/src/libmain/progress-bar.hh b/src/libmain/progress-bar.hh index c3c6e3833..83209e863 100644 --- a/src/libmain/progress-bar.hh +++ b/src/libmain/progress-bar.hh @@ -5,7 +5,7 @@ namespace nix { -Logger * makeProgressBar(); +std::unique_ptr makeProgressBar(); void startProgressBar(); diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 50f90bfb3..30e76c349 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -315,20 +315,6 @@ void printVersion(const std::string & programName) throw Exit(); } - -void showManPage(const std::string & name) -{ - restoreProcessContext(); - setEnv("MANPATH", settings.nixManDir.c_str()); - execlp("man", "man", name.c_str(), nullptr); - if (errno == ENOENT) { - // Not SysError because we don't want to suffix the errno, aka No such file or directory. - throw Error("The '%1%' command was not found, but it is needed for '%2%' and some other '%3%' commands' help text. Perhaps you could install the '%1%' command?", "man", name.c_str(), "nix-*"); - } - throw SysError("command 'man %1%' failed", name.c_str()); -} - - int handleExceptions(const std::string & programName, std::function fun) { ReceiveInterrupts receiveInterrupts; // FIXME: need better place for this diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 712b404d3..a6a18ceb0 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -70,11 +70,6 @@ struct LegacyArgs : public MixCommonArgs, public RootArgs }; -/** - * Show the manual page for the specified program. - */ -void showManPage(const std::string & name); - /** * The constructor of this class starts a pager if standard output is a * terminal and $PAGER is set. Standard output is redirected to the diff --git a/src/libstore-c/package.nix b/src/libstore-c/package.nix index 89abeaab8..fde17c78e 100644 --- a/src/libstore-c/package.nix +++ b/src/libstore-c/package.nix @@ -36,14 +36,6 @@ mkMesonLibrary (finalAttrs: { nix-store ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libstore-test-support/package.nix b/src/libstore-test-support/package.nix index 7cc29795c..ccac25ee1 100644 --- a/src/libstore-test-support/package.nix +++ b/src/libstore-test-support/package.nix @@ -40,14 +40,6 @@ mkMesonLibrary (finalAttrs: { rapidcheck ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libstore-tests/derivation-advanced-attrs.cc b/src/libstore-tests/derivation-advanced-attrs.cc index 9d2c64ef3..107cf13e3 100644 --- a/src/libstore-tests/derivation-advanced-attrs.cc +++ b/src/libstore-tests/derivation-advanced-attrs.cc @@ -3,13 +3,15 @@ #include "experimental-features.hh" #include "derivations.hh" - -#include "tests/libstore.hh" -#include "tests/characterization.hh" +#include "derivations.hh" +#include "derivation-options.hh" #include "parsed-derivations.hh" #include "types.hh" #include "json-utils.hh" +#include "tests/libstore.hh" +#include "tests/characterization.hh" + namespace nix { using nlohmann::json; @@ -80,21 +82,30 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_defaults) auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), ""); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), false); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), false); - EXPECT_EQ(parsedDrv.getStringsAttr("allowedReferences"), std::nullopt); - EXPECT_EQ(parsedDrv.getStringsAttr("allowedRequisites"), std::nullopt); - EXPECT_EQ(parsedDrv.getStringsAttr("disallowedReferences"), std::nullopt); - EXPECT_EQ(parsedDrv.getStringsAttr("disallowedRequisites"), std::nullopt); - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), StringSet()); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), true); - EXPECT_EQ(parsedDrv.useUidRange(), false); + EXPECT_TRUE(!parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, ""); + EXPECT_EQ(options.noChroot, false); + EXPECT_EQ(options.impureHostDeps, StringSet{}); + EXPECT_EQ(options.impureEnvVars, StringSet{}); + EXPECT_EQ(options.allowLocalNetworking, false); + { + auto * checksForAllOutputs_ = std::get_if<0>(&options.outputChecks); + ASSERT_TRUE(checksForAllOutputs_ != nullptr); + auto & checksForAllOutputs = *checksForAllOutputs_; + + EXPECT_EQ(checksForAllOutputs.allowedReferences, std::nullopt); + EXPECT_EQ(checksForAllOutputs.allowedRequisites, std::nullopt); + EXPECT_EQ(checksForAllOutputs.disallowedReferences, StringSet{}); + EXPECT_EQ(checksForAllOutputs.disallowedRequisites, StringSet{}); + } + EXPECT_EQ(options.getRequiredSystemFeatures(got), StringSet()); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), true); + EXPECT_EQ(options.useUidRange(got), false); }); }; @@ -106,29 +117,36 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes) auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); StringSet systemFeatures{"rainbow", "uid-range"}; - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "sandcastle"); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), true); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings{"/usr/bin/ditto"}); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings{"UNICORN"}); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), true); - EXPECT_EQ( - parsedDrv.getStringsAttr("allowedReferences"), Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); - EXPECT_EQ( - parsedDrv.getStringsAttr("allowedRequisites"), Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); - EXPECT_EQ( - parsedDrv.getStringsAttr("disallowedReferences"), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); - EXPECT_EQ( - parsedDrv.getStringsAttr("disallowedRequisites"), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), systemFeatures); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), false); - EXPECT_EQ(parsedDrv.useUidRange(), true); + EXPECT_TRUE(!parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, "sandcastle"); + EXPECT_EQ(options.noChroot, true); + EXPECT_EQ(options.impureHostDeps, StringSet{"/usr/bin/ditto"}); + EXPECT_EQ(options.impureEnvVars, StringSet{"UNICORN"}); + EXPECT_EQ(options.allowLocalNetworking, true); + { + auto * checksForAllOutputs_ = std::get_if<0>(&options.outputChecks); + ASSERT_TRUE(checksForAllOutputs_ != nullptr); + auto & checksForAllOutputs = *checksForAllOutputs_; + + EXPECT_EQ( + checksForAllOutputs.allowedReferences, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + EXPECT_EQ( + checksForAllOutputs.allowedRequisites, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + EXPECT_EQ( + checksForAllOutputs.disallowedReferences, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + EXPECT_EQ( + checksForAllOutputs.disallowedRequisites, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + } + EXPECT_EQ(options.getRequiredSystemFeatures(got), systemFeatures); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), false); + EXPECT_EQ(options.useUidRange(got), true); }); }; @@ -140,27 +158,29 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_structuredAttr auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), ""); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), false); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings()); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), false); + EXPECT_TRUE(parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, ""); + EXPECT_EQ(options.noChroot, false); + EXPECT_EQ(options.impureHostDeps, StringSet{}); + EXPECT_EQ(options.impureEnvVars, StringSet{}); + EXPECT_EQ(options.allowLocalNetworking, false); { - auto structuredAttrs_ = parsedDrv.getStructuredAttrs(); - ASSERT_TRUE(structuredAttrs_); - auto & structuredAttrs = *structuredAttrs_; + auto * checksPerOutput_ = std::get_if<1>(&options.outputChecks); + ASSERT_TRUE(checksPerOutput_ != nullptr); + auto & checksPerOutput = *checksPerOutput_; - auto outputChecks_ = get(structuredAttrs, "outputChecks"); - ASSERT_FALSE(outputChecks_); + EXPECT_EQ(checksPerOutput.size(), 0); } - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), StringSet()); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), true); - EXPECT_EQ(parsedDrv.useUidRange(), false); + EXPECT_EQ(options.getRequiredSystemFeatures(got), StringSet()); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), true); + EXPECT_EQ(options.useUidRange(got), false); }); }; @@ -172,62 +192,52 @@ TEST_F(DerivationAdvancedAttrsTest, Derivation_advancedAttributes_structuredAttr auto drvPath = writeDerivation(*store, got, NoRepair, true); ParsedDerivation parsedDrv(drvPath, got); + DerivationOptions options = DerivationOptions::fromParsedDerivation(parsedDrv); StringSet systemFeatures{"rainbow", "uid-range"}; - EXPECT_EQ(parsedDrv.getStringAttr("__sandboxProfile").value_or(""), "sandcastle"); - EXPECT_EQ(parsedDrv.getBoolAttr("__noChroot"), true); - EXPECT_EQ(parsedDrv.getStringsAttr("__impureHostDeps").value_or(Strings()), Strings{"/usr/bin/ditto"}); - EXPECT_EQ(parsedDrv.getStringsAttr("impureEnvVars").value_or(Strings()), Strings{"UNICORN"}); - EXPECT_EQ(parsedDrv.getBoolAttr("__darwinAllowLocalNetworking"), true); + EXPECT_TRUE(parsedDrv.hasStructuredAttrs()); + + EXPECT_EQ(options.additionalSandboxProfile, "sandcastle"); + EXPECT_EQ(options.noChroot, true); + EXPECT_EQ(options.impureHostDeps, StringSet{"/usr/bin/ditto"}); + EXPECT_EQ(options.impureEnvVars, StringSet{"UNICORN"}); + EXPECT_EQ(options.allowLocalNetworking, true); { - auto structuredAttrs_ = parsedDrv.getStructuredAttrs(); - ASSERT_TRUE(structuredAttrs_); - auto & structuredAttrs = *structuredAttrs_; - - auto outputChecks_ = get(structuredAttrs, "outputChecks"); - ASSERT_TRUE(outputChecks_); - auto & outputChecks = *outputChecks_; - { - auto output_ = get(outputChecks, "out"); + auto output_ = get(std::get<1>(options.outputChecks), "out"); ASSERT_TRUE(output_); auto & output = *output_; - EXPECT_EQ( - get(output, "allowedReferences")->get(), - Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); - EXPECT_EQ( - get(output, "allowedRequisites")->get(), - Strings{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + + EXPECT_EQ(output.allowedReferences, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); + EXPECT_EQ(output.allowedRequisites, StringSet{"/nix/store/3c08bzb71z4wiag719ipjxr277653ynp-foo"}); } { - auto output_ = get(outputChecks, "bin"); + auto output_ = get(std::get<1>(options.outputChecks), "bin"); ASSERT_TRUE(output_); auto & output = *output_; - EXPECT_EQ( - get(output, "disallowedReferences")->get(), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); - EXPECT_EQ( - get(output, "disallowedRequisites")->get(), - Strings{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + + EXPECT_EQ(output.disallowedReferences, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); + EXPECT_EQ(output.disallowedRequisites, StringSet{"/nix/store/7rhsm8i393hm1wcsmph782awg1hi2f7x-bar"}); } { - auto output_ = get(outputChecks, "dev"); + auto output_ = get(std::get<1>(options.outputChecks), "dev"); ASSERT_TRUE(output_); auto & output = *output_; - EXPECT_EQ(get(output, "maxSize")->get(), 789); - EXPECT_EQ(get(output, "maxClosureSize")->get(), 5909); + + EXPECT_EQ(output.maxSize, 789); + EXPECT_EQ(output.maxClosureSize, 5909); } } - EXPECT_EQ(parsedDrv.getRequiredSystemFeatures(), systemFeatures); - EXPECT_EQ(parsedDrv.canBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.willBuildLocally(*store), false); - EXPECT_EQ(parsedDrv.substitutesAllowed(), false); - EXPECT_EQ(parsedDrv.useUidRange(), true); + EXPECT_EQ(options.getRequiredSystemFeatures(got), systemFeatures); + EXPECT_EQ(options.canBuildLocally(*store, got), false); + EXPECT_EQ(options.willBuildLocally(*store, got), false); + EXPECT_EQ(options.substitutesAllowed(), false); + EXPECT_EQ(options.useUidRange(got), true); }); }; diff --git a/src/libstore-tests/package.nix b/src/libstore-tests/package.nix index 670386c4a..b39ee7fa7 100644 --- a/src/libstore-tests/package.nix +++ b/src/libstore-tests/package.nix @@ -52,14 +52,6 @@ mkMesonExecutable (finalAttrs: { nix-store-test-support ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libstore/build/derivation-creation-and-realisation-goal.cc b/src/libstore/build/derivation-creation-and-realisation-goal.cc new file mode 100644 index 000000000..c33b7571f --- /dev/null +++ b/src/libstore/build/derivation-creation-and-realisation-goal.cc @@ -0,0 +1,126 @@ +#include "derivation-creation-and-realisation-goal.hh" +#include "worker.hh" + +namespace nix { + +DerivationCreationAndRealisationGoal::DerivationCreationAndRealisationGoal( + ref drvReq, const OutputsSpec & wantedOutputs, Worker & worker, BuildMode buildMode) + : Goal(worker, DerivedPath::Built{.drvPath = drvReq, .outputs = wantedOutputs}) + , drvReq(drvReq) + , wantedOutputs(wantedOutputs) + , buildMode(buildMode) +{ + name = + fmt("outer obtaining drv from '%s' and then building outputs %s", + drvReq->to_string(worker.store), + std::visit( + overloaded{ + [&](const OutputsSpec::All) -> std::string { return "* (all of them)"; }, + [&](const OutputsSpec::Names os) { return concatStringsSep(", ", quoteStrings(os)); }, + }, + wantedOutputs.raw)); + trace("created outer"); + + worker.updateProgress(); +} + +DerivationCreationAndRealisationGoal::~DerivationCreationAndRealisationGoal() {} + +static StorePath pathPartOfReq(const SingleDerivedPath & req) +{ + return std::visit( + overloaded{ + [&](const SingleDerivedPath::Opaque & bo) { return bo.path; }, + [&](const SingleDerivedPath::Built & bfd) { return pathPartOfReq(*bfd.drvPath); }, + }, + req.raw()); +} + +std::string DerivationCreationAndRealisationGoal::key() +{ + /* Ensure that derivations get built in order of their name, + i.e. a derivation named "aardvark" always comes before "baboon". And + substitution goals and inner derivation goals always happen before + derivation goals (due to "b$"). */ + return "c$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + drvReq->to_string(worker.store); +} + +void DerivationCreationAndRealisationGoal::timedOut(Error && ex) {} + +void DerivationCreationAndRealisationGoal::addWantedOutputs(const OutputsSpec & outputs) +{ + /* If we already want all outputs, there is nothing to do. */ + auto newWanted = wantedOutputs.union_(outputs); + bool needRestart = !newWanted.isSubsetOf(wantedOutputs); + wantedOutputs = newWanted; + + if (!needRestart) + return; + + if (!optDrvPath) + // haven't started steps where the outputs matter yet + return; + worker.makeDerivationGoal(*optDrvPath, outputs, buildMode); +} + +Goal::Co DerivationCreationAndRealisationGoal::init() +{ + trace("outer init"); + + /* The first thing to do is to make sure that the derivation + exists. If it doesn't, it may be created through a + substitute. */ + if (auto optDrvPath = [this]() -> std::optional { + if (buildMode != bmNormal) + return std::nullopt; + + auto drvPath = StorePath::dummy; + try { + drvPath = resolveDerivedPath(worker.store, *drvReq); + } catch (MissingRealisation &) { + return std::nullopt; + } + auto cond = worker.evalStore.isValidPath(drvPath) || worker.store.isValidPath(drvPath); + return cond ? std::optional{drvPath} : std::nullopt; + }()) { + trace( + fmt("already have drv '%s' for '%s', can go straight to building", + worker.store.printStorePath(*optDrvPath), + drvReq->to_string(worker.store))); + } else { + trace("need to obtain drv we want to build"); + addWaitee(worker.makeGoal(DerivedPath::fromSingle(*drvReq))); + co_await Suspend{}; + } + + trace("outer load and build derivation"); + + if (nrFailed != 0) { + co_return amDone(ecFailed, Error("cannot build missing derivation '%s'", drvReq->to_string(worker.store))); + } + + StorePath drvPath = resolveDerivedPath(worker.store, *drvReq); + /* Build this step! */ + concreteDrvGoal = worker.makeDerivationGoal(drvPath, wantedOutputs, buildMode); + { + auto g = upcast_goal(concreteDrvGoal); + /* We will finish with it ourselves, as if we were the derivational goal. */ + g->preserveException = true; + } + optDrvPath = std::move(drvPath); + addWaitee(upcast_goal(concreteDrvGoal)); + co_await Suspend{}; + + trace("outer build done"); + + buildResult = upcast_goal(concreteDrvGoal) + ->getBuildResult(DerivedPath::Built{ + .drvPath = drvReq, + .outputs = wantedOutputs, + }); + + auto g = upcast_goal(concreteDrvGoal); + co_return amDone(g->exitCode, g->ex); +} + +} diff --git a/src/libstore/build/derivation-creation-and-realisation-goal.hh b/src/libstore/build/derivation-creation-and-realisation-goal.hh new file mode 100644 index 000000000..40fe40053 --- /dev/null +++ b/src/libstore/build/derivation-creation-and-realisation-goal.hh @@ -0,0 +1,88 @@ +#pragma once + +#include "parsed-derivations.hh" +#include "store-api.hh" +#include "pathlocks.hh" +#include "goal.hh" + +namespace nix { + +struct DerivationGoal; + +/** + * This goal type is essentially the serial composition (like function + * composition) of a goal for getting a derivation, and then a + * `DerivationGoal` using the newly-obtained derivation. + * + * In the (currently experimental) general inductive case of derivations + * that are themselves build outputs, that first goal will be *another* + * `DerivationCreationAndRealisationGoal`. In the (much more common) base-case + * where the derivation has no provence and is just referred to by + * (content-addressed) store path, that first goal is a + * `SubstitutionGoal`. + * + * If we already have the derivation (e.g. if the evaluator has created + * the derivation locally and then instructured the store to build it), + * we can skip the first goal entirely as a small optimization. + */ +struct DerivationCreationAndRealisationGoal : public Goal +{ + /** + * How to obtain a store path of the derivation to build. + */ + ref drvReq; + + /** + * The path of the derivation, once obtained. + **/ + std::optional optDrvPath; + + /** + * The goal for the corresponding concrete derivation. + **/ + std::shared_ptr concreteDrvGoal; + + /** + * The specific outputs that we need to build. + */ + OutputsSpec wantedOutputs; + + /** + * The final output paths of the build. + * + * - For input-addressed derivations, always the precomputed paths + * + * - For content-addressed derivations, calcuated from whatever the + * hash ends up being. (Note that fixed outputs derivations that + * produce the "wrong" output still install that data under its + * true content-address.) + */ + OutputPathMap finalOutputs; + + BuildMode buildMode; + + DerivationCreationAndRealisationGoal( + ref drvReq, + const OutputsSpec & wantedOutputs, + Worker & worker, + BuildMode buildMode = bmNormal); + virtual ~DerivationCreationAndRealisationGoal(); + + void timedOut(Error && ex) override; + + std::string key() override; + + /** + * Add wanted outputs to an already existing derivation goal. + */ + void addWantedOutputs(const OutputsSpec & outputs); + + Co init() override; + + JobCategory jobCategory() const override + { + return JobCategory::Administration; + }; +}; + +} diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index a167d9261..41762cde1 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -137,21 +137,8 @@ Goal::Co DerivationGoal::init() { trace("init"); if (useDerivation) { - /* The first thing to do is to make sure that the derivation - exists. If it doesn't, it may be created through a - substitute. */ - - if (buildMode != bmNormal || !worker.evalStore.isValidPath(drvPath)) { - addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath))); - co_await Suspend{}; - } - trace("loading derivation"); - if (nrFailed != 0) { - co_return done(BuildResult::MiscFailure, {}, Error("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath))); - } - /* `drvPath' should already be a root, but let's be on the safe side: if the user forgot to make it a root, we wouldn't want things being garbage collected while we're busy. */ @@ -181,6 +168,7 @@ Goal::Co DerivationGoal::haveDerivation() trace("have derivation"); parsedDrv = std::make_unique(drvPath, *drv); + drvOptions = std::make_unique(DerivationOptions::fromParsedDerivation(*parsedDrv)); if (!drv->type().hasKnownOutputPaths()) experimentalFeatureSettings.require(Xp::CaDerivations); @@ -237,7 +225,7 @@ Goal::Co DerivationGoal::haveDerivation() /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ - if (settings.useSubstitutes && parsedDrv->substitutesAllowed()) + if (settings.useSubstitutes && drvOptions->substitutesAllowed()) for (auto & [outputName, status] : initialOutputs) { if (!status.wanted) continue; if (!status.known) @@ -627,7 +615,7 @@ Goal::Co DerivationGoal::tryToBuild() `preferLocalBuild' set. Also, check and repair modes are only supported for local builds. */ bool buildLocally = - (buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store)) + (buildMode != bmNormal || drvOptions->willBuildLocally(worker.store, *drv)) && settings.maxBuildJobs.get() != 0; if (!buildLocally) { @@ -1123,7 +1111,7 @@ HookReply DerivationGoal::tryBuildHook() << (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0) << drv->platform << worker.store.printStorePath(drvPath) - << parsedDrv->getRequiredSystemFeatures(); + << drvOptions->getRequiredSystemFeatures(*drv); worker.hook->sink.flush(); /* Read the first line of input, which should be a word indicating @@ -1552,23 +1540,24 @@ void DerivationGoal::waiteeDone(GoalPtr waitee, ExitCode result) if (!useDerivation || !drv) return; auto & fullDrv = *dynamic_cast(drv.get()); - auto * dg = dynamic_cast(&*waitee); - if (!dg) return; + std::optional info = tryGetConcreteDrvGoal(waitee); + if (!info) return; + const auto & [dg, drvReq] = *info; - auto * nodeP = fullDrv.inputDrvs.findSlot(DerivedPath::Opaque { .path = dg->drvPath }); + auto * nodeP = fullDrv.inputDrvs.findSlot(drvReq.get()); if (!nodeP) return; auto & outputs = nodeP->value; for (auto & outputName : outputs) { - auto buildResult = dg->getBuildResult(DerivedPath::Built { - .drvPath = makeConstantStorePathRef(dg->drvPath), + auto buildResult = dg.get().getBuildResult(DerivedPath::Built { + .drvPath = makeConstantStorePathRef(dg.get().drvPath), .outputs = OutputsSpec::Names { outputName }, }); if (buildResult.success()) { auto i = buildResult.builtOutputs.find(outputName); if (i != buildResult.builtOutputs.end()) inputDrvOutputs.insert_or_assign( - { dg->drvPath, outputName }, + { dg.get().drvPath, outputName }, i->second.outPath); } } diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh index 4e9c14519..3ff34509a 100644 --- a/src/libstore/build/derivation-goal.hh +++ b/src/libstore/build/derivation-goal.hh @@ -2,6 +2,7 @@ ///@file #include "parsed-derivations.hh" +#include "derivation-options.hh" #ifndef _WIN32 # include "user-lock.hh" #endif @@ -56,6 +57,10 @@ struct InitialOutput { /** * A goal for building some or all of the outputs of a derivation. + * + * The derivation must already be present, either in the store in a drv + * or in memory. If the derivation itself needs to be gotten first, a + * `DerivationCreationAndRealisationGoal` goal must be used instead. */ struct DerivationGoal : public Goal { @@ -143,6 +148,7 @@ struct DerivationGoal : public Goal std::unique_ptr drv; std::unique_ptr parsedDrv; + std::unique_ptr drvOptions; /** * The remainder is state held during the build. diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index 3bf22320e..a473daff9 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -1,6 +1,7 @@ #include "worker.hh" #include "substitution-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows +# include "derivation-creation-and-realisation-goal.hh" # include "derivation-goal.hh" #endif #include "local-store.hh" @@ -29,8 +30,8 @@ void Store::buildPaths(const std::vector & reqs, BuildMode buildMod } if (i->exitCode != Goal::ecSuccess) { #ifndef _WIN32 // TODO Enable building on Windows - if (auto i2 = dynamic_cast(i.get())) - failed.insert(printStorePath(i2->drvPath)); + if (auto i2 = dynamic_cast(i.get())) + failed.insert(i2->drvReq->to_string(*this)); else #endif if (auto i2 = dynamic_cast(i.get())) diff --git a/src/libstore/build/goal.cc b/src/libstore/build/goal.cc index 9a16da145..c381e5b58 100644 --- a/src/libstore/build/goal.cc +++ b/src/libstore/build/goal.cc @@ -175,7 +175,7 @@ Goal::Done Goal::amDone(ExitCode result, std::optional ex) exitCode = result; if (ex) { - if (!waiters.empty()) + if (!preserveException && !waiters.empty()) logError(ex->info()); else this->ex = std::move(*ex); diff --git a/src/libstore/build/goal.hh b/src/libstore/build/goal.hh index 1dd7ed525..2db1098b7 100644 --- a/src/libstore/build/goal.hh +++ b/src/libstore/build/goal.hh @@ -50,6 +50,16 @@ enum struct JobCategory { * A substitution an arbitrary store object; it will use network resources. */ Substitution, + /** + * A goal that does no "real" work by itself, and just exists to depend on + * other goals which *do* do real work. These goals therefore are not + * limited. + * + * These goals cannot infinitely create themselves, so there is no risk of + * a "fork bomb" type situation (which would be a problem even though the + * goal do no real work) either. + */ + Administration, }; struct Goal : public std::enable_shared_from_this @@ -373,6 +383,17 @@ public: */ BuildResult getBuildResult(const DerivedPath &) const; + /** + * Hack to say that this goal should not log `ex`, but instead keep + * it around. Set by a waitee which sees itself as the designated + * continuation of this goal, responsible for reporting its + * successes or failures. + * + * @todo this is yet another not-nice hack in the goal system that + * we ought to get rid of. See #11927 + */ + bool preserveException = false; + /** * Exception containing an error message, if any. */ diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index dbe86f43f..b765fc2a0 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "substitution-goal.hh" #include "drv-output-substitution-goal.hh" #include "derivation-goal.hh" +#include "derivation-creation-and-realisation-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows # include "local-derivation-goal.hh" # include "hook-instance.hh" @@ -43,6 +44,24 @@ Worker::~Worker() } +std::shared_ptr Worker::makeDerivationCreationAndRealisationGoal( + ref drvReq, + const OutputsSpec & wantedOutputs, + BuildMode buildMode) +{ + std::weak_ptr & goal_weak = outerDerivationGoals.ensureSlot(*drvReq).value; + std::shared_ptr goal = goal_weak.lock(); + if (!goal) { + goal = std::make_shared(drvReq, wantedOutputs, *this, buildMode); + goal_weak = goal; + wakeUp(goal); + } else { + goal->addWantedOutputs(wantedOutputs); + } + return goal; +} + + std::shared_ptr Worker::makeDerivationGoalCommon( const StorePath & drvPath, const OutputsSpec & wantedOutputs, @@ -120,10 +139,7 @@ GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode) { return std::visit(overloaded { [&](const DerivedPath::Built & bfd) -> GoalPtr { - if (auto bop = std::get_if(&*bfd.drvPath)) - return makeDerivationGoal(bop->path, bfd.outputs, buildMode); - else - throw UnimplementedError("Building dynamic derivations in one shot is not yet implemented."); + return makeDerivationCreationAndRealisationGoal(bfd.drvPath, bfd.outputs, buildMode); }, [&](const DerivedPath::Opaque & bo) -> GoalPtr { return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair); @@ -132,24 +148,46 @@ GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode) } +template +static void cullMap(std::map & goalMap, F f) +{ + for (auto i = goalMap.begin(); i != goalMap.end();) + if (!f(i->second)) + i = goalMap.erase(i); + else ++i; +} + + template static void removeGoal(std::shared_ptr goal, std::map> & goalMap) { /* !!! inefficient */ - for (auto i = goalMap.begin(); - i != goalMap.end(); ) - if (i->second.lock() == goal) { - auto j = i; ++j; - goalMap.erase(i); - i = j; - } - else ++i; + cullMap(goalMap, [&](const std::weak_ptr & gp) -> bool { + return gp.lock() != goal; + }); +} + +template +static void removeGoal(std::shared_ptr goal, std::map>::ChildNode> & goalMap); + +template +static void removeGoal(std::shared_ptr goal, std::map>::ChildNode> & goalMap) +{ + /* !!! inefficient */ + cullMap(goalMap, [&](DerivedPathMap>::ChildNode & node) -> bool { + if (node.value.lock() == goal) + node.value.reset(); + removeGoal(goal, node.childMap); + return !node.value.expired() || !node.childMap.empty(); + }); } void Worker::removeGoal(GoalPtr goal) { - if (auto drvGoal = std::dynamic_pointer_cast(goal)) + if (auto drvGoal = std::dynamic_pointer_cast(goal)) + nix::removeGoal(drvGoal, outerDerivationGoals.map); + else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) @@ -215,6 +253,9 @@ void Worker::childStarted(GoalPtr goal, const std::set 0); nrLocalBuilds--; break; + case JobCategory::Administration: + /* Intentionally not limited, see docs */ + break; default: unreachable(); } @@ -290,9 +334,9 @@ void Worker::run(const Goals & _topGoals) for (auto & i : _topGoals) { topGoals.insert(i); - if (auto goal = dynamic_cast(i.get())) { + if (auto goal = dynamic_cast(i.get())) { topPaths.push_back(DerivedPath::Built { - .drvPath = makeConstantStorePathRef(goal->drvPath), + .drvPath = goal->drvReq, .outputs = goal->wantedOutputs, }); } else @@ -552,4 +596,22 @@ GoalPtr upcast_goal(std::shared_ptr subGoal) return subGoal; } +GoalPtr upcast_goal(std::shared_ptr subGoal) +{ + return subGoal; +} + +std::optional, std::reference_wrapper>> tryGetConcreteDrvGoal(GoalPtr waitee) +{ + auto * odg = dynamic_cast(&*waitee); + if (!odg) return std::nullopt; + /* If we failed to obtain the concrete drv, we won't have created + the concrete derivation goal. */ + if (!odg->concreteDrvGoal) return std::nullopt; + return {{ + std::cref(*odg->concreteDrvGoal), + std::cref(*odg->drvReq), + }}; +} + } diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh index f5e617208..efd518f99 100644 --- a/src/libstore/build/worker.hh +++ b/src/libstore/build/worker.hh @@ -3,6 +3,7 @@ #include "types.hh" #include "store-api.hh" +#include "derived-path-map.hh" #include "goal.hh" #include "realisation.hh" #include "muxable-pipe.hh" @@ -13,6 +14,7 @@ namespace nix { /* Forward definition. */ +struct DerivationCreationAndRealisationGoal; struct DerivationGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -31,9 +33,25 @@ class DrvOutputSubstitutionGoal; */ GoalPtr upcast_goal(std::shared_ptr subGoal); GoalPtr upcast_goal(std::shared_ptr subGoal); +GoalPtr upcast_goal(std::shared_ptr subGoal); typedef std::chrono::time_point steady_time_point; +/** + * The current implementation of impure derivations has + * `DerivationGoal`s accumulate realisations from their waitees. + * Unfortunately, `DerivationGoal`s don't directly depend on other + * goals, but instead depend on `DerivationCreationAndRealisationGoal`s. + * + * We try not to share any of the details of any goal type with any + * other, for sake of modularity and quicker rebuilds. This means we + * cannot "just" downcast and fish out the field. So as an escape hatch, + * we have made the function, written in `worker.cc` where all the goal + * types are visible, and use it instead. + */ + +std::optional, std::reference_wrapper>> tryGetConcreteDrvGoal(GoalPtr waitee); + /** * A mapping used to remember for each child process to what goal it * belongs, and comm channels for receiving log data and output @@ -103,6 +121,9 @@ private: * Maps used to prevent multiple instantiations of a goal for the * same derivation / path. */ + + DerivedPathMap> outerDerivationGoals; + std::map> derivationGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -196,6 +217,9 @@ public: * @ref DerivationGoal "derivation goal" */ private: + std::shared_ptr makeDerivationCreationAndRealisationGoal( + ref drvPath, + const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal); std::shared_ptr makeDerivationGoalCommon( const StorePath & drvPath, const OutputsSpec & wantedOutputs, std::function()> mkDrvGoal); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index d6745f516..60cb64b7b 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -1041,11 +1041,15 @@ void processConnection( conn.protoVersion = protoVersion; conn.features = features; - auto tunnelLogger = new TunnelLogger(conn.to, protoVersion); - auto prevLogger = nix::logger; + auto tunnelLogger_ = std::make_unique(conn.to, protoVersion); + auto tunnelLogger = tunnelLogger_.get(); + std::unique_ptr prevLogger_; + auto prevLogger = logger.get(); // FIXME - if (!recursive) - logger = tunnelLogger; + if (!recursive) { + prevLogger_ = std::move(logger); + logger = std::move(tunnelLogger_); + } unsigned int opCount = 0; diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc new file mode 100644 index 000000000..1fc1718f7 --- /dev/null +++ b/src/libstore/derivation-options.cc @@ -0,0 +1,274 @@ +#include "derivation-options.hh" +#include "json-utils.hh" +#include "parsed-derivations.hh" +#include "types.hh" +#include "util.hh" +#include +#include +#include + +namespace nix { + +using OutputChecks = DerivationOptions::OutputChecks; + +using OutputChecksVariant = std::variant>; + +DerivationOptions DerivationOptions::fromParsedDerivation(const ParsedDerivation & parsed, bool shouldWarn) +{ + DerivationOptions defaults = {}; + + auto structuredAttrs = parsed.structuredAttrs.get(); + + if (shouldWarn && structuredAttrs) { + if (get(*structuredAttrs, "allowedReferences")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "allowedRequisites")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "disallowedRequisites")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "disallowedReferences")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "maxSize")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead"); + } + if (get(*structuredAttrs, "maxClosureSize")) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead"); + } + } + + return { + .outputChecks = [&]() -> OutputChecksVariant { + if (auto structuredAttrs = parsed.structuredAttrs.get()) { + std::map res; + if (auto outputChecks = get(*structuredAttrs, "outputChecks")) { + for (auto & [outputName, output] : getObject(*outputChecks)) { + OutputChecks checks; + + if (auto maxSize = get(output, "maxSize")) + checks.maxSize = maxSize->get(); + + if (auto maxClosureSize = get(output, "maxClosureSize")) + checks.maxClosureSize = maxClosureSize->get(); + + auto get_ = [&](const std::string & name) -> std::optional { + if (auto i = get(output, name)) { + StringSet res; + for (auto j = i->begin(); j != i->end(); ++j) { + if (!j->is_string()) + throw Error("attribute '%s' must be a list of strings", name); + res.insert(j->get()); + } + checks.disallowedRequisites = res; + return res; + } + return {}; + }; + + checks.allowedReferences = get_("allowedReferences"); + checks.allowedRequisites = get_("allowedRequisites"); + checks.disallowedReferences = get_("disallowedReferences").value_or(StringSet{}); + checks.disallowedRequisites = get_("disallowedRequisites").value_or(StringSet{}); + ; + + res.insert_or_assign(outputName, std::move(checks)); + } + } + return res; + } else { + return OutputChecks{ + // legacy non-structured-attributes case + .ignoreSelfRefs = true, + .allowedReferences = parsed.getStringSetAttr("allowedReferences"), + .disallowedReferences = parsed.getStringSetAttr("disallowedReferences").value_or(StringSet{}), + .allowedRequisites = parsed.getStringSetAttr("allowedRequisites"), + .disallowedRequisites = parsed.getStringSetAttr("disallowedRequisites").value_or(StringSet{}), + }; + } + }(), + .unsafeDiscardReferences = + [&] { + std::map res; + + if (auto structuredAttrs = parsed.structuredAttrs.get()) { + if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) { + for (auto & [outputName, output] : getObject(*udr)) { + if (!output.is_boolean()) + throw Error("attribute 'unsafeDiscardReferences.\"%s\"' must be a Boolean", outputName); + res.insert_or_assign(outputName, output.get()); + } + } + } + + return res; + }(), + .passAsFile = + [&] { + StringSet res; + if (auto * passAsFileString = get(parsed.drv.env, "passAsFile")) { + if (parsed.hasStructuredAttrs()) { + if (shouldWarn) { + warn( + "'structuredAttrs' disables the effect of the top-level attribute 'passAsFile'; because all JSON is always passed via file"); + } + } else { + res = tokenizeString(*passAsFileString); + } + } + return res; + }(), + .additionalSandboxProfile = + parsed.getStringAttr("__sandboxProfile").value_or(defaults.additionalSandboxProfile), + .noChroot = parsed.getBoolAttr("__noChroot", defaults.noChroot), + .impureHostDeps = parsed.getStringSetAttr("__impureHostDeps").value_or(defaults.impureHostDeps), + .impureEnvVars = parsed.getStringSetAttr("impureEnvVars").value_or(defaults.impureEnvVars), + .allowLocalNetworking = parsed.getBoolAttr("__darwinAllowLocalNetworking", defaults.allowLocalNetworking), + .requiredSystemFeatures = + parsed.getStringSetAttr("requiredSystemFeatures").value_or(defaults.requiredSystemFeatures), + .preferLocalBuild = parsed.getBoolAttr("preferLocalBuild", defaults.preferLocalBuild), + .allowSubstitutes = parsed.getBoolAttr("allowSubstitutes", defaults.allowSubstitutes), + }; +} + +StringSet DerivationOptions::getRequiredSystemFeatures(const BasicDerivation & drv) const +{ + // FIXME: cache this? + StringSet res; + for (auto & i : requiredSystemFeatures) + res.insert(i); + if (!drv.type().hasKnownOutputPaths()) + res.insert("ca-derivations"); + return res; +} + +bool DerivationOptions::canBuildLocally(Store & localStore, const BasicDerivation & drv) const +{ + if (drv.platform != settings.thisSystem.get() && !settings.extraPlatforms.get().count(drv.platform) + && !drv.isBuiltin()) + return false; + + if (settings.maxBuildJobs.get() == 0 && !drv.isBuiltin()) + return false; + + for (auto & feature : getRequiredSystemFeatures(drv)) + if (!localStore.systemFeatures.get().count(feature)) + return false; + + return true; +} + +bool DerivationOptions::willBuildLocally(Store & localStore, const BasicDerivation & drv) const +{ + return preferLocalBuild && canBuildLocally(localStore, drv); +} + +bool DerivationOptions::substitutesAllowed() const +{ + return settings.alwaysAllowSubstitutes ? true : allowSubstitutes; +} + +bool DerivationOptions::useUidRange(const BasicDerivation & drv) const +{ + return getRequiredSystemFeatures(drv).count("uid-range"); +} + +} + +namespace nlohmann { + +using namespace nix; + +DerivationOptions adl_serializer::from_json(const json & json) +{ + return { + .outputChecks = [&]() -> OutputChecksVariant { + auto outputChecks = getObject(valueAt(json, "outputChecks")); + + auto forAllOutputsOpt = optionalValueAt(outputChecks, "forAllOutputs"); + auto perOutputOpt = optionalValueAt(outputChecks, "perOutput"); + + if (forAllOutputsOpt && !perOutputOpt) { + return static_cast(*forAllOutputsOpt); + } else if (perOutputOpt && !forAllOutputsOpt) { + return static_cast>(*perOutputOpt); + } else { + throw Error("Exactly one of 'perOutput' or 'forAllOutputs' is required"); + } + }(), + + .unsafeDiscardReferences = valueAt(json, "unsafeDiscardReferences"), + .passAsFile = getStringSet(valueAt(json, "passAsFile")), + + .additionalSandboxProfile = getString(valueAt(json, "additionalSandboxProfile")), + .noChroot = getBoolean(valueAt(json, "noChroot")), + .impureHostDeps = getStringSet(valueAt(json, "impureHostDeps")), + .impureEnvVars = getStringSet(valueAt(json, "impureEnvVars")), + .allowLocalNetworking = getBoolean(valueAt(json, "allowLocalNetworking")), + + .requiredSystemFeatures = getStringSet(valueAt(json, "requiredSystemFeatures")), + .preferLocalBuild = getBoolean(valueAt(json, "preferLocalBuild")), + .allowSubstitutes = getBoolean(valueAt(json, "allowSubstitutes")), + }; +} + +void adl_serializer::to_json(json & json, DerivationOptions o) +{ + json["outputChecks"] = std::visit( + overloaded{ + [&](const OutputChecks & checks) { + nlohmann::json outputChecks; + outputChecks["forAllOutputs"] = checks; + return outputChecks; + }, + [&](const std::map & checksPerOutput) { + nlohmann::json outputChecks; + outputChecks["perOutput"] = checksPerOutput; + return outputChecks; + }, + }, + o.outputChecks); + + json["unsafeDiscardReferences"] = o.unsafeDiscardReferences; + json["passAsFile"] = o.passAsFile; + + json["additionalSandboxProfile"] = o.additionalSandboxProfile; + json["noChroot"] = o.noChroot; + json["impureHostDeps"] = o.impureHostDeps; + json["impureEnvVars"] = o.impureEnvVars; + json["allowLocalNetworking"] = o.allowLocalNetworking; + + json["requiredSystemFeatures"] = o.requiredSystemFeatures; + json["preferLocalBuild"] = o.preferLocalBuild; + json["allowSubstitutes"] = o.allowSubstitutes; +} + +DerivationOptions::OutputChecks adl_serializer::from_json(const json & json) +{ + return { + .ignoreSelfRefs = getBoolean(valueAt(json, "ignoreSelfRefs")), + .allowedReferences = nullableValueAt(json, "allowedReferences"), + .disallowedReferences = getStringSet(valueAt(json, "disallowedReferences")), + .allowedRequisites = nullableValueAt(json, "allowedRequisites"), + .disallowedRequisites = getStringSet(valueAt(json, "disallowedRequisites")), + }; +} + +void adl_serializer::to_json(json & json, DerivationOptions::OutputChecks c) +{ + json["ignoreSelfRefs"] = c.ignoreSelfRefs; + json["allowedReferences"] = c.allowedReferences; + json["disallowedReferences"] = c.disallowedReferences; + json["allowedRequisites"] = c.allowedRequisites; + json["disallowedRequisites"] = c.disallowedRequisites; +} + +} diff --git a/src/libstore/derivation-options.hh b/src/libstore/derivation-options.hh new file mode 100644 index 000000000..6e4ea5cd9 --- /dev/null +++ b/src/libstore/derivation-options.hh @@ -0,0 +1,185 @@ +#pragma once +///@file + +#include +#include +#include +#include + +#include "types.hh" +#include "json-impls.hh" + +namespace nix { + +class Store; +struct BasicDerivation; +class ParsedDerivation; + +/** + * This represents all the special options on a `Derivation`. + * + * Currently, these options are parsed from the environment variables + * with the aid of `ParsedDerivation`. + * + * The first goal of this data type is to make sure that no other code + * uses `ParsedDerivation` to ad-hoc parse some additional options. That + * ensures this data type is up to date and fully correct. + * + * The second goal of this data type is to allow an alternative to + * hackily parsing the options from the environment variables. The ATerm + * format cannot change, but in alternatives to it (like the JSON + * format), we have the option of instead storing the options + * separately. That would be nice to separate concerns, and not make any + * environment variable names magical. + */ +struct DerivationOptions +{ + struct OutputChecks + { + bool ignoreSelfRefs = false; + std::optional maxSize, maxClosureSize; + + /** + * env: allowedReferences + * + * A value of `nullopt` indicates that the check is skipped. + * This means that all references are allowed. + */ + std::optional allowedReferences; + + /** + * env: disallowedReferences + * + * No needed for `std::optional`, because skipping the check is + * the same as disallowing the references. + */ + StringSet disallowedReferences; + + /** + * env: allowedRequisites + * + * See `allowedReferences` + */ + std::optional allowedRequisites; + + /** + * env: disallowedRequisites + * + * See `disallowedReferences` + */ + StringSet disallowedRequisites; + + bool operator==(const OutputChecks &) const = default; + }; + + /** + * Either one set of checks for all outputs, or separate checks + * per-output. + */ + std::variant> outputChecks = OutputChecks{}; + + /** + * Whether to avoid scanning for references for a given output. + */ + std::map unsafeDiscardReferences; + + /** + * In non-structured mode, all bindings specified in the derivation + * go directly via the environment, except those listed in the + * passAsFile attribute. Those are instead passed as file names + * pointing to temporary files containing the contents. + * + * Note that passAsFile is ignored in structure mode because it's + * not needed (attributes are not passed through the environment, so + * there is no size constraint). + */ + StringSet passAsFile; + + /** + * env: __sandboxProfile + * + * Just for Darwin + */ + std::string additionalSandboxProfile = ""; + + /** + * env: __noChroot + * + * Derivation would like to opt out of the sandbox. + * + * Builder is free to not respect this wish (because it is + * insecure) and fail the build instead. + */ + bool noChroot = false; + + /** + * env: __impureHostDeps + */ + StringSet impureHostDeps = {}; + + /** + * env: impureEnvVars + */ + StringSet impureEnvVars = {}; + + /** + * env: __darwinAllowLocalNetworking + * + * Just for Darwin + */ + bool allowLocalNetworking = false; + + /** + * env: requiredSystemFeatures + */ + StringSet requiredSystemFeatures = {}; + + /** + * env: preferLocalBuild + */ + bool preferLocalBuild = false; + + /** + * env: allowSubstitutes + */ + bool allowSubstitutes = true; + + bool operator==(const DerivationOptions &) const = default; + + /** + * Parse this information from its legacy encoding as part of the + * environment. This should not be used with nice greenfield formats + * (e.g. JSON) but is necessary for supporing old formats (e.g. + * ATerm). + */ + static DerivationOptions fromParsedDerivation(const ParsedDerivation & parsed, bool shouldWarn = true); + + /** + * @param drv Must be the same derivation we parsed this from. In + * the future we'll flip things around so a `BasicDerivation` has + * `DerivationOptions` instead. + */ + StringSet getRequiredSystemFeatures(const BasicDerivation & drv) const; + + /** + * @param drv See note on `getRequiredSystemFeatures` + */ + bool canBuildLocally(Store & localStore, const BasicDerivation & drv) const; + + /** + * @param drv See note on `getRequiredSystemFeatures` + */ + bool willBuildLocally(Store & localStore, const BasicDerivation & drv) const; + + bool substitutesAllowed() const; + + /** + * @param drv See note on `getRequiredSystemFeatures` + */ + bool useUidRange(const BasicDerivation & drv) const; +}; + +}; + +JSON_IMPL(DerivationOptions); +JSON_IMPL(DerivationOptions::OutputChecks) diff --git a/src/libstore/derived-path-map.cc b/src/libstore/derived-path-map.cc index c97d52773..0095a9d78 100644 --- a/src/libstore/derived-path-map.cc +++ b/src/libstore/derived-path-map.cc @@ -52,6 +52,7 @@ typename DerivedPathMap::ChildNode * DerivedPathMap::findSlot(const Single // instantiations +#include "derivation-creation-and-realisation-goal.hh" namespace nix { template<> @@ -68,4 +69,7 @@ std::strong_ordering DerivedPathMap>::ChildNode::operator template struct DerivedPathMap>::ChildNode; template struct DerivedPathMap>; +template struct DerivedPathMap>; + + }; diff --git a/src/libstore/derived-path-map.hh b/src/libstore/derived-path-map.hh index bd60fe887..61e0b5463 100644 --- a/src/libstore/derived-path-map.hh +++ b/src/libstore/derived-path-map.hh @@ -21,8 +21,11 @@ namespace nix { * * @param V A type to instantiate for each output. It should probably * should be an "optional" type so not every interior node has to have a - * value. `* const Something` or `std::optional` would be - * good choices for "optional" types. + * value. For example, the scheduler uses + * `DerivedPathMap>` to + * remember which goals correspond to which outputs. `* const Something` + * or `std::optional` would also be good choices for + * "optional" types. */ template struct DerivedPathMap { diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 932e1d756..f2430631d 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -94,7 +94,7 @@ struct curlFileTransfer : public FileTransfer : fileTransfer(fileTransfer) , request(request) , act(*logger, lvlTalkative, actFileTransfer, - fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri), + request.post ? "" : fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri), {request.uri}, request.parentAct) , callback(std::move(callback)) , finalSink([this](std::string_view data) { @@ -271,11 +271,21 @@ struct curlFileTransfer : public FileTransfer return getInterrupted(); } + int silentProgressCallback(double dltotal, double dlnow) + { + return getInterrupted(); + } + static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow) { return ((TransferItem *) userp)->progressCallback(dltotal, dlnow); } + static int silentProgressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow) + { + return ((TransferItem *) userp)->silentProgressCallback(dltotal, dlnow); + } + static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr) { if (type == CURLINFO_TEXT) @@ -340,8 +350,11 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper); curl_easy_setopt(req, CURLOPT_HEADERDATA, this); - curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper); - curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this); + if (request.post) + curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, silentProgressCallbackWrapper); + else + curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, progressCallbackWrapper); + curl_easy_setopt(req, CURLOPT_XFERINFODATA, this); curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0); curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders); @@ -353,7 +366,10 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_NOBODY, 1); if (request.data) { - curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); + if (request.post) + curl_easy_setopt(req, CURLOPT_POST, 1L); + else + curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); curl_easy_setopt(req, CURLOPT_READDATA, this); curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); @@ -430,7 +446,8 @@ struct curlFileTransfer : public FileTransfer if (httpStatus == 304 && result.etag == "") result.etag = request.expectedETag; - act.progress(result.bodySize, result.bodySize); + if (!request.post) + act.progress(result.bodySize, result.bodySize); done = true; callback(std::move(result)); } diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 43a384d71..0ecc7f376 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -65,6 +65,7 @@ struct FileTransferRequest std::string expectedETag; bool verifyTLS = true; bool head = false; + bool post = false; size_t tries = fileTransferSettings.tries; unsigned int baseRetryTimeMs = 250; ActivityId parentAct; diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index b64e73c26..d7c000dfa 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -65,7 +65,6 @@ Settings::Settings() , nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR))) , nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR))) , nixUserConfFiles(getUserConfigFiles()) - , nixManDir(canonPath(NIX_MAN_DIR)) , nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH))) { #ifndef _WIN32 @@ -243,7 +242,7 @@ Path Settings::getDefaultSSLCertFile() return ""; } -const std::string nixVersion = PACKAGE_VERSION; +std::string nixVersion = PACKAGE_VERSION; NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, { {SandboxMode::smEnabled, true}, diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index e9a180164..c539ff836 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -84,11 +84,6 @@ public: */ std::vector nixUserConfFiles; - /** - * The directory where the man pages are stored. - */ - Path nixManDir; - /** * File name of the socket the daemon listens to. */ @@ -1064,7 +1059,10 @@ public: 1. `NIX_SSL_CERT_FILE` 2. `SSL_CERT_FILE` - )"}; + )", + {}, + // Don't document the machine-specific default value + false}; #if __linux__ Setting filterSyscalls{ @@ -1253,7 +1251,15 @@ void loadConfFile(AbstractConfig & config); // Used by the Settings constructor std::vector getUserConfigFiles(); -extern const std::string nixVersion; +/** + * The version of Nix itself. + * + * This is not `const`, so that the Nix CLI can provide a more detailed version + * number including the git revision, without having to "re-compile" the entire + * set of Nix libraries to include that version, even when those libraries are + * not affected by the change. + */ +extern std::string nixVersion; /** * @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests. diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index eac360a4f..480f41059 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -69,7 +69,10 @@ ref LegacySSHStore::openConnection() command.push_back("--store"); command.push_back(remoteStore.get()); } - conn->sshConn = master.startCommand(std::move(command)); + conn->sshConn = master.startCommand(std::move(command), std::list{extraSshArgs}); + if (connPipeSize) { + conn->sshConn->trySetBufferSize(*connPipeSize); + } conn->to = FdSink(conn->sshConn->in.get()); conn->from = FdSource(conn->sshConn->out.get()); @@ -100,19 +103,31 @@ std::string LegacySSHStore::getUri() return *uriSchemes().begin() + "://" + host; } +std::map LegacySSHStore::queryPathInfosUncached( + const StorePathSet & paths) +{ + auto conn(connections->get()); + + /* No longer support missing NAR hash */ + assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4); + + debug("querying remote host '%s' for info on '%s'", host, concatStringsSep(", ", printStorePathSet(paths))); + + auto infos = conn->queryPathInfos(*this, paths); + + for (const auto & [_, info] : infos) { + if (info.narHash == Hash::dummy) + throw Error("NAR hash is now mandatory"); + } + + return infos; +} void LegacySSHStore::queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept { try { - auto conn(connections->get()); - - /* No longer support missing NAR hash */ - assert(GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4); - - debug("querying remote host '%s' for info on '%s'", host, printStorePath(path)); - - auto infos = conn->queryPathInfos(*this, {path}); + auto infos = queryPathInfosUncached({path}); switch (infos.size()) { case 0: @@ -120,9 +135,6 @@ void LegacySSHStore::queryPathInfoUncached(const StorePath & path, case 1: { auto & [path2, info] = *infos.begin(); - if (info.narHash == Hash::dummy) - throw Error("NAR hash is now mandatory"); - assert(path == path2); return callback(std::make_shared( std::move(path), @@ -193,13 +205,19 @@ void LegacySSHStore::addToStore(const ValidPathInfo & info, Source & source, void LegacySSHStore::narFromPath(const StorePath & path, Sink & sink) { - auto conn(connections->get()); - conn->narFromPath(*this, path, [&](auto & source) { + narFromPath(path, [&](auto & source) { copyNAR(source, sink); }); } +void LegacySSHStore::narFromPath(const StorePath & path, std::function fun) +{ + auto conn(connections->get()); + conn->narFromPath(*this, path, fun); +} + + static ServeProto::BuildOptions buildSettings() { return { @@ -223,6 +241,19 @@ BuildResult LegacySSHStore::buildDerivation(const StorePath & drvPath, const Bas return conn->getBuildDerivationResponse(*this); } +std::function LegacySSHStore::buildDerivationAsync( + const StorePath & drvPath, const BasicDerivation & drv, + const ServeProto::BuildOptions & options) +{ + // Until we have C++23 std::move_only_function + auto conn = std::make_shared::Handle>(connections->get()); + (*conn)->putBuildDerivationRequest(*this, drvPath, drv, options); + + return [this,conn]() -> BuildResult { + return (*conn)->getBuildDerivationResponse(*this); + }; +} + void LegacySSHStore::buildPaths(const std::vector & drvPaths, BuildMode buildMode, std::shared_ptr evalStore) { @@ -294,6 +325,32 @@ StorePathSet LegacySSHStore::queryValidPaths(const StorePathSet & paths, } +StorePathSet LegacySSHStore::queryValidPaths(const StorePathSet & paths, + bool lock, SubstituteFlag maybeSubstitute) +{ + auto conn(connections->get()); + return conn->queryValidPaths(*this, + lock, paths, maybeSubstitute); +} + + +void LegacySSHStore::addMultipleToStoreLegacy(Store & srcStore, const StorePathSet & paths) +{ + auto conn(connections->get()); + conn->to << ServeProto::Command::ImportPaths; + try { + srcStore.exportPaths(paths, conn->to); + } catch (...) { + conn->good = false; + throw; + } + conn->to.flush(); + + if (readInt(conn->from) != 1) + throw Error("remote machine failed to import closure"); +} + + void LegacySSHStore::connect() { auto conn(connections->get()); @@ -307,6 +364,28 @@ unsigned int LegacySSHStore::getProtocol() } +pid_t LegacySSHStore::getConnectionPid() +{ + auto conn(connections->get()); +#ifndef _WIN32 + return conn->sshConn->sshPid; +#else + // TODO: Implement + return 0; +#endif +} + + +LegacySSHStore::ConnectionStats LegacySSHStore::getConnectionStats() +{ + auto conn(connections->get()); + return { + .bytesReceived = conn->from.read, + .bytesSent = conn->to.written, + }; +} + + /** * The legacy ssh protocol doesn't support checking for trusted-user. * Try using ssh-ng:// instead if you want to know. diff --git a/src/libstore/legacy-ssh-store.hh b/src/libstore/legacy-ssh-store.hh index b541455b4..92aa4ae56 100644 --- a/src/libstore/legacy-ssh-store.hh +++ b/src/libstore/legacy-ssh-store.hh @@ -6,6 +6,7 @@ #include "ssh.hh" #include "callback.hh" #include "pool.hh" +#include "serve-protocol.hh" namespace nix { @@ -24,6 +25,16 @@ struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig const Setting maxConnections{this, 1, "max-connections", "Maximum number of concurrent SSH connections."}; + /** + * Hack for hydra + */ + Strings extraSshArgs = {}; + + /** + * Exposed for hydra + */ + std::optional connPipeSize; + const std::string name() override { return "SSH Store"; } static std::set uriSchemes() { return {"ssh"}; } @@ -60,11 +71,24 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept override; + std::map queryPathInfosUncached( + const StorePathSet & paths); + void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override; void narFromPath(const StorePath & path, Sink & sink) override; + /** + * Hands over the connection temporarily as source to the given + * function. The function must not consume beyond the NAR; it can + * not just blindly try to always read more bytes until it is + * cut-off. + * + * This is exposed for sake of Hydra. + */ + void narFromPath(const StorePath & path, std::function fun); + std::optional queryPathFromHashPart(const std::string & hashPart) override { unsupported("queryPathFromHashPart"); } @@ -93,6 +117,16 @@ public: BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; + /** + * Note, the returned function must only be called once, or we'll + * try to read from the connection twice. + * + * @todo Use C++23 `std::move_only_function`. + */ + std::function buildDerivationAsync( + const StorePath & drvPath, const BasicDerivation & drv, + const ServeProto::BuildOptions & options); + void buildPaths(const std::vector & drvPaths, BuildMode buildMode, std::shared_ptr evalStore) override; void ensurePath(const StorePath & path) override @@ -119,10 +153,36 @@ public: StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute) override; + /** + * Custom variation that atomically creates temp locks on the remote + * side. + * + * This exists to prevent a race where the remote host + * garbage-collects paths that are already there. Optionally, ask + * the remote host to substitute missing paths. + */ + StorePathSet queryValidPaths(const StorePathSet & paths, + bool lock, + SubstituteFlag maybeSubstitute = NoSubstitute); + + /** + * Just exists because this is exactly what Hydra was doing, and we + * don't yet want an algorithmic change. + */ + void addMultipleToStoreLegacy(Store & srcStore, const StorePathSet & paths); + void connect() override; unsigned int getProtocol() override; + struct ConnectionStats { + size_t bytesReceived, bytesSent; + }; + + ConnectionStats getConnectionStats(); + + pid_t getConnectionPid(); + /** * The legacy ssh protocol doesn't support checking for trusted-user. * Try using ssh-ng:// instead if you want to know. diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 9fa68303f..67d5a1dcb 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -136,7 +136,12 @@ LocalStore::LocalStore( for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) { createDirs(perUserDir); if (!readOnly) { - if (chmod(perUserDir.c_str(), 0755) == -1) + auto st = lstat(perUserDir); + + // Skip chmod call if the directory already has the correct permissions (0755). + // This is to avoid failing when the executing user lacks permissions to change the directory's permissions + // even if it would be no-op. + if ((st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) != 0755 && chmod(perUserDir.c_str(), 0755) == -1) throw SysError("could not set permissions on '%s' to 755", perUserDir); } } diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 79d912497..899ba33fe 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -183,6 +183,7 @@ sources = files( 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-goal.cc', + 'build/derivation-creation-and-realisation-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', 'build/goal.cc', @@ -196,6 +197,7 @@ sources = files( 'content-address.cc', 'daemon.cc', 'derivations.cc', + 'derivation-options.cc', 'derived-path-map.cc', 'derived-path.cc', 'downstream-placeholder.cc', @@ -255,6 +257,7 @@ headers = [config_h] + files( 'binary-cache-store.hh', 'build-result.hh', 'build/derivation-goal.hh', + 'build/derivation-creation-and-realisation-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', 'build/substitution-goal.hh', @@ -267,6 +270,7 @@ headers = [config_h] + files( 'content-address.hh', 'daemon.hh', 'derivations.hh', + 'derivation-options.hh', 'derived-path-map.hh', 'derived-path.hh', 'downstream-placeholder.hh', diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index bcc02206b..9d3b24326 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -2,6 +2,7 @@ #include "derivations.hh" #include "parsed-derivations.hh" +#include "derivation-options.hh" #include "globals.hh" #include "store-api.hh" #include "thread-pool.hh" @@ -222,8 +223,9 @@ void Store::queryMissing(const std::vector & targets, auto drv = make_ref(derivationFromPath(drvPath)); ParsedDerivation parsedDrv(StorePath(drvPath), *drv); + DerivationOptions drvOptions = DerivationOptions::fromParsedDerivation(parsedDrv); - if (!knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) { + if (!knownOutputPaths && settings.useSubstitutes && drvOptions.substitutesAllowed()) { experimentalFeatureSettings.require(Xp::CaDerivations); // If there are unknown output paths, attempt to find if the @@ -253,7 +255,7 @@ void Store::queryMissing(const std::vector & targets, } } - if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) { + if (knownOutputPaths && settings.useSubstitutes && drvOptions.substitutesAllowed()) { auto drvState = make_ref>(DrvState(invalid.size())); for (auto & output : invalid) pool.enqueue(std::bind(checkOutput, drvPath, drv, output, drvState)); diff --git a/src/libstore/package.nix b/src/libstore/package.nix index c982b44f0..31867d331 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -69,14 +69,6 @@ mkMesonLibrary (finalAttrs: { nlohmann_json ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index d8459d4d7..b26c36efe 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -87,47 +87,12 @@ std::optional ParsedDerivation::getStringsAttr(const std::string & name } } -StringSet ParsedDerivation::getRequiredSystemFeatures() const +std::optional ParsedDerivation::getStringSetAttr(const std::string & name) const { - // FIXME: cache this? - StringSet res; - for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) - res.insert(i); - if (!drv.type().hasKnownOutputPaths()) - res.insert("ca-derivations"); - return res; -} - -bool ParsedDerivation::canBuildLocally(Store & localStore) const -{ - if (drv.platform != settings.thisSystem.get() - && !settings.extraPlatforms.get().count(drv.platform) - && !drv.isBuiltin()) - return false; - - if (settings.maxBuildJobs.get() == 0 - && !drv.isBuiltin()) - return false; - - for (auto & feature : getRequiredSystemFeatures()) - if (!localStore.systemFeatures.get().count(feature)) return false; - - return true; -} - -bool ParsedDerivation::willBuildLocally(Store & localStore) const -{ - return getBoolAttr("preferLocalBuild") && canBuildLocally(localStore); -} - -bool ParsedDerivation::substitutesAllowed() const -{ - return settings.alwaysAllowSubstitutes ? true : getBoolAttr("allowSubstitutes", true); -} - -bool ParsedDerivation::useUidRange() const -{ - return getRequiredSystemFeatures().count("uid-range"); + auto ss = getStringsAttr(name); + return ss + ? (std::optional{StringSet{ss->begin(), ss->end()}}) + : (std::optional{}); } static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); @@ -188,7 +153,6 @@ static nlohmann::json pathInfoToJSON( std::optional ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths) { - auto structuredAttrs = getStructuredAttrs(); if (!structuredAttrs) return std::nullopt; auto json = *structuredAttrs; diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh index 71085a604..51992fa84 100644 --- a/src/libstore/parsed-derivations.hh +++ b/src/libstore/parsed-derivations.hh @@ -8,38 +8,40 @@ namespace nix { +struct DerivationOptions; + class ParsedDerivation { StorePath drvPath; BasicDerivation & drv; std::unique_ptr structuredAttrs; -public: - - ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv); - - ~ParsedDerivation(); - - const nlohmann::json * getStructuredAttrs() const - { - return structuredAttrs.get(); - } - std::optional getStringAttr(const std::string & name) const; bool getBoolAttr(const std::string & name, bool def = false) const; std::optional getStringsAttr(const std::string & name) const; - StringSet getRequiredSystemFeatures() const; + std::optional getStringSetAttr(const std::string & name) const; - bool canBuildLocally(Store & localStore) const; + /** + * Only `DerivationOptions` is allowed to parse individual fields + * from `ParsedDerivation`. This ensure that it includes all + * derivation options, and, the likes of `LocalDerivationGoal` are + * incapable of more ad-hoc options. + */ + friend struct DerivationOptions; - bool willBuildLocally(Store & localStore) const; +public: - bool substitutesAllowed() const; + ParsedDerivation(const StorePath & drvPath, BasicDerivation & drv); - bool useUidRange() const; + ~ParsedDerivation(); + + bool hasStructuredAttrs() const + { + return static_cast(structuredAttrs); + } std::optional prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths); }; diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 116a480ba..f47cfbbec 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -240,4 +240,19 @@ Path SSHMaster::startMaster() #endif +void SSHMaster::Connection::trySetBufferSize(size_t size) +{ +#ifdef F_SETPIPE_SZ + /* This `fcntl` method of doing this takes a positive `int`. Check + and convert accordingly. + + The function overall still takes `size_t` because this is more + portable for a platform-agnostic interface. */ + assert(size <= INT_MAX); + int pipesize = size; + fcntl(in.get(), F_SETPIPE_SZ, pipesize); + fcntl(out.get(), F_SETPIPE_SZ, pipesize); +#endif +} + } diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index 85be704ec..eb05df011 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -54,6 +54,18 @@ public: Pid sshPid; #endif AutoCloseFD out, in; + + /** + * Try to set the buffer size in both directions to the + * designated amount, if possible. If not possible, does + * nothing. + * + * Current implementation is to use `fcntl` with `F_SETPIPE_SZ`, + * which is Linux-only. For this implementation, `size` must + * convertable to an `int`. In other words, it must be within + * `[0, INT_MAX]`. + */ + void trySetBufferSize(size_t size); }; /** diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 236622eae..fc3fbcc0f 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -230,18 +230,22 @@ void Store::addMultipleToStore( { std::atomic nrDone{0}; std::atomic nrFailed{0}; - std::atomic bytesExpected{0}; std::atomic nrRunning{0}; using PathWithInfo = std::pair>; + uint64_t bytesExpected = 0; + std::map infosMap; StorePathSet storePathsToAdd; for (auto & thingToAdd : pathsToCopy) { + bytesExpected += thingToAdd.first.narSize; infosMap.insert_or_assign(thingToAdd.first.path, &thingToAdd); storePathsToAdd.insert(thingToAdd.first.path); } + act.setExpected(actCopyPath, bytesExpected); + auto showProgress = [&, nrTotal = pathsToCopy.size()]() { act.progress(nrDone, nrTotal, nrRunning, nrFailed); }; @@ -259,9 +263,6 @@ void Store::addMultipleToStore( return StorePathSet(); } - bytesExpected += info.narSize; - act.setExpected(actCopyPath, bytesExpected); - return info.references; }, diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index 21eb1506d..61a36dd51 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -184,10 +184,6 @@ void LocalDerivationGoal::killSandbox(bool getStats) Goal::Co LocalDerivationGoal::tryLocalBuild() { -#if __APPLE__ - additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); -#endif - unsigned int curBuilds = worker.getNrLocalBuilds(); if (curBuilds >= settings.maxBuildJobs) { worker.waitForBuildSlot(shared_from_this()); @@ -200,13 +196,12 @@ Goal::Co LocalDerivationGoal::tryLocalBuild() /* Are we doing a chroot build? */ { - auto noChroot = parsedDrv->getBoolAttr("__noChroot"); if (settings.sandboxMode == smEnabled) { - if (noChroot) + if (drvOptions->noChroot) throw Error("derivation '%s' has '__noChroot' set, " "but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath)); #if __APPLE__ - if (additionalSandboxProfile != "") + if (drvOptions->additionalSandboxProfile != "") throw Error("derivation '%s' specifies a sandbox profile, " "but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath)); #endif @@ -215,7 +210,7 @@ Goal::Co LocalDerivationGoal::tryLocalBuild() else if (settings.sandboxMode == smDisabled) useChroot = false; else if (settings.sandboxMode == smRelaxed) - useChroot = derivationType->isSandboxed() && !noChroot; + useChroot = derivationType->isSandboxed() && !drvOptions->noChroot; } auto & localStore = getLocalStore(); @@ -240,7 +235,7 @@ Goal::Co LocalDerivationGoal::tryLocalBuild() if (useBuildUsers()) { if (!buildUser) - buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot); + buildUser = acquireUserLock(drvOptions->useUidRange(*drv) ? 65536 : 1, useChroot); if (!buildUser) { if (!actLock) @@ -531,13 +526,19 @@ void LocalDerivationGoal::startBuilder() killSandbox(false); /* Right platform? */ - if (!parsedDrv->canBuildLocally(worker.store)) - throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", - drv->platform, - concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), - worker.store.printStorePath(drvPath), - settings.thisSystem, - concatStringsSep(", ", worker.store.systemFeatures)); + if (!drvOptions->canBuildLocally(worker.store, *drv)) { + // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should tell them to run the command to install Darwin 2 + if (drv->platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") { + throw Error("run `/usr/sbin/softwareupdate --install-rosetta` to enable your %s to run programs for %s", settings.thisSystem, drv->platform); + } else { + throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", + drv->platform, + concatStringsSep(", ", drvOptions->getRequiredSystemFeatures(*drv)), + worker.store.printStorePath(drvPath), + settings.thisSystem, + concatStringsSep(", ", worker.store.systemFeatures)); + } + } /* Create a temporary directory where the build will take place. */ @@ -622,7 +623,7 @@ void LocalDerivationGoal::startBuilder() writeStructuredAttrs(); /* Handle exportReferencesGraph(), if set. */ - if (!parsedDrv->getStructuredAttrs()) { + if (!parsedDrv->hasStructuredAttrs()) { /* The `exportReferencesGraph' feature allows the references graph to be passed to a builder. This attribute should be a list of pairs [name1 path1 name2 path2 ...]. The references graph of @@ -696,7 +697,7 @@ void LocalDerivationGoal::startBuilder() PathSet allowedPaths = settings.allowedImpureHostPrefixes; /* This works like the above, except on a per-derivation level */ - auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings()); + auto impurePaths = drvOptions->impureHostDeps; for (auto & i : impurePaths) { bool found = false; @@ -716,7 +717,7 @@ void LocalDerivationGoal::startBuilder() throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps", worker.store.printStorePath(drvPath), i); - /* Allow files in __impureHostDeps to be missing; e.g. + /* Allow files in drvOptions->impureHostDeps to be missing; e.g. macOS 11+ has no /usr/lib/libSystem*.dylib */ pathsInChroot[i] = {i, true}; } @@ -756,10 +757,10 @@ void LocalDerivationGoal::startBuilder() nobody account. The latter is kind of a hack to support Samba-in-QEMU. */ createDirs(chrootRootDir + "/etc"); - if (parsedDrv->useUidRange()) + if (drvOptions->useUidRange(*drv)) chownToBuilder(chrootRootDir + "/etc"); - if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536)) + if (drvOptions->useUidRange(*drv) && (!buildUser || buildUser->getUIDCount() < 65536)) throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name); /* Declare the build user's group so that programs get a consistent @@ -818,7 +819,7 @@ void LocalDerivationGoal::startBuilder() } #else - if (parsedDrv->useUidRange()) + if (drvOptions->useUidRange(*drv)) throw Error("feature 'uid-range' is not supported on this platform"); #if __APPLE__ /* We don't really have any parent prep work to do (yet?) @@ -828,7 +829,7 @@ void LocalDerivationGoal::startBuilder() #endif #endif } else { - if (parsedDrv->useUidRange()) + if (drvOptions->useUidRange(*drv)) throw Error("feature 'uid-range' is only supported in sandboxed builds"); } @@ -873,7 +874,7 @@ void LocalDerivationGoal::startBuilder() /* Fire up a Nix daemon to process recursive Nix calls from the builder. */ - if (parsedDrv->getRequiredSystemFeatures().count("recursive-nix")) + if (drvOptions->getRequiredSystemFeatures(*drv).count("recursive-nix")) startDaemon(); /* Run the builder. */ @@ -1141,18 +1142,12 @@ void LocalDerivationGoal::initTmpDir() tmpDirInSandbox = tmpDir; #endif - /* In non-structured mode, add all bindings specified in the - derivation via the environment, except those listed in the - passAsFile attribute. Those are passed as file names pointing - to temporary files containing the contents. Note that - passAsFile is ignored in structure mode because it's not - needed (attributes are not passed through the environment, so - there is no size constraint). */ - if (!parsedDrv->getStructuredAttrs()) { - - StringSet passAsFile = tokenizeString(getOr(drv->env, "passAsFile", "")); + /* In non-structured mode, set all bindings either directory in the + environment or via a file, as specified by + `DerivationOptions::passAsFile`. */ + if (!parsedDrv->hasStructuredAttrs()) { for (auto & i : drv->env) { - if (passAsFile.find(i.first) == passAsFile.end()) { + if (drvOptions->passAsFile.find(i.first) == drvOptions->passAsFile.end()) { env[i.first] = i.second; } else { auto hash = hashString(HashAlgorithm::SHA256, i.first); @@ -1229,7 +1224,7 @@ void LocalDerivationGoal::initEnv() if (!impureEnv.empty()) experimentalFeatureSettings.require(Xp::ConfigurableImpureEnv); - for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings())) { + for (auto & i : drvOptions->impureEnvVars){ auto envVar = impureEnv.find(i); if (envVar != impureEnv.end()) { env[i] = envVar->second; @@ -1989,7 +1984,7 @@ void LocalDerivationGoal::runChild() } /* Make /etc unwritable */ - if (!parsedDrv->useUidRange()) + if (!drvOptions->useUidRange(*drv)) chmod_(chrootRootDir + "/etc", 0555); /* Unshare this mount namespace. This is necessary because @@ -2176,7 +2171,7 @@ void LocalDerivationGoal::runChild() } sandboxProfile += ")\n"; - sandboxProfile += additionalSandboxProfile; + sandboxProfile += drvOptions->additionalSandboxProfile; } else sandboxProfile += #include "sandbox-minimal.sb" @@ -2185,8 +2180,6 @@ void LocalDerivationGoal::runChild() debug("Generated sandbox profile:"); debug(sandboxProfile); - bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking"); - /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to put their files, if needed. */ Path globalTmpDir = canonPath(defaultTempDir(), true); @@ -2199,7 +2192,7 @@ void LocalDerivationGoal::runChild() Strings sandboxArgs; sandboxArgs.push_back("_GLOBAL_TMP_DIR"); sandboxArgs.push_back(globalTmpDir); - if (allowLocalNetworking) { + if (drvOptions->allowLocalNetworking) { sandboxArgs.push_back("_ALLOW_LOCAL_NETWORKING"); sandboxArgs.push_back("1"); } @@ -2219,7 +2212,7 @@ void LocalDerivationGoal::runChild() /* Execute the program. This should not return. */ if (drv->isBuiltin()) { try { - logger = makeJSONLogger(*logger); + logger = makeJSONLogger(getStandardError()); std::map outputs; for (auto & e : drv->outputs) @@ -2389,14 +2382,8 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs() inodesSeen); bool discardReferences = false; - if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) { - if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) { - if (auto output = get(*udr, outputName)) { - if (!output->is_boolean()) - throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string()); - discardReferences = output->get(); - } - } + if (auto udr = get(drvOptions->unsafeDiscardReferences, outputName)) { + discardReferences = *udr; } StorePathSet references; @@ -2867,13 +2854,6 @@ void LocalDerivationGoal::checkOutputs(const std::map maxSize, maxClosureSize; - std::optional allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites; - }; - /* Compute the closure and closure size of some output. This is slightly tricky because some of its references (namely other outputs) may not be valid yet. */ @@ -2905,7 +2885,7 @@ void LocalDerivationGoal::checkOutputs(const std::map *checks.maxSize) throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes", @@ -2918,15 +2898,13 @@ void LocalDerivationGoal::checkOutputs(const std::map & value, bool allowed, bool recursive) + auto checkRefs = [&](const StringSet & value, bool allowed, bool recursive) { - if (!value) return; - /* Parse a list of reference specifiers. Each element must either be a store path, or the symbolic name of the output of the derivation (such as `out'). */ StorePathSet spec; - for (auto & i : *value) { + for (auto & i : value) { if (worker.store.isStorePath(i)) spec.insert(worker.store.parseStorePath(i)); else if (auto output = get(outputs, i)) @@ -2964,73 +2942,35 @@ void LocalDerivationGoal::checkOutputs(const std::mapgetStructuredAttrs()) { - if (get(*structuredAttrs, "allowedReferences")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "allowedRequisites")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "disallowedRequisites")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "disallowedReferences")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "maxSize")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead"); - } - if (get(*structuredAttrs, "maxClosureSize")){ - warn("'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead"); - } - if (auto outputChecks = get(*structuredAttrs, "outputChecks")) { - if (auto output = get(*outputChecks, outputName)) { - Checks checks; + std::visit(overloaded{ + [&](const DerivationOptions::OutputChecks & checks) { + applyChecks(checks); + }, + [&](const std::map & checksPerOutput) { + if (auto outputChecks = get(checksPerOutput, outputName)) - if (auto maxSize = get(*output, "maxSize")) - checks.maxSize = maxSize->get(); - - if (auto maxClosureSize = get(*output, "maxClosureSize")) - checks.maxClosureSize = maxClosureSize->get(); - - auto get_ = [&](const std::string & name) -> std::optional { - if (auto i = get(*output, name)) { - Strings res; - for (auto j = i->begin(); j != i->end(); ++j) { - if (!j->is_string()) - throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, worker.store.printStorePath(drvPath)); - res.push_back(j->get()); - } - checks.disallowedRequisites = res; - return res; - } - return {}; - }; - - checks.allowedReferences = get_("allowedReferences"); - checks.allowedRequisites = get_("allowedRequisites"); - checks.disallowedReferences = get_("disallowedReferences"); - checks.disallowedRequisites = get_("disallowedRequisites"); - - applyChecks(checks); - } - } - } else { - // legacy non-structured-attributes case - Checks checks; - checks.ignoreSelfRefs = true; - checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences"); - checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites"); - checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences"); - checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites"); - applyChecks(checks); - } + applyChecks(*outputChecks); + }, + }, drvOptions->outputChecks); } } diff --git a/src/libstore/unix/build/local-derivation-goal.hh b/src/libstore/unix/build/local-derivation-goal.hh index 917028880..c7a129f90 100644 --- a/src/libstore/unix/build/local-derivation-goal.hh +++ b/src/libstore/unix/build/local-derivation-goal.hh @@ -109,11 +109,6 @@ struct LocalDerivationGoal : public DerivationGoal typedef map Environment; Environment env; -#if __APPLE__ - typedef std::string SandboxProfile; - SandboxProfile additionalSandboxProfile; -#endif - /** * Hash rewriting. */ diff --git a/src/libutil-c/package.nix b/src/libutil-c/package.nix index 72f57d6f9..f26f57775 100644 --- a/src/libutil-c/package.nix +++ b/src/libutil-c/package.nix @@ -34,14 +34,6 @@ mkMesonLibrary (finalAttrs: { nix-util ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libutil-test-support/package.nix b/src/libutil-test-support/package.nix index 33cd5217d..fafd47c86 100644 --- a/src/libutil-test-support/package.nix +++ b/src/libutil-test-support/package.nix @@ -38,14 +38,6 @@ mkMesonLibrary (finalAttrs: { rapidcheck ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libutil-tests/package.nix b/src/libutil-tests/package.nix index d89c54453..c06de6894 100644 --- a/src/libutil-tests/package.nix +++ b/src/libutil-tests/package.nix @@ -45,14 +45,6 @@ mkMesonExecutable (finalAttrs: { gtest ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ ]; diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index dba5893a8..158e202d1 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -125,6 +125,8 @@ constexpr std::array xpFeatureDetails runCommand "foo" { + # Optional: let Nix know "foo" requires the experimental feature + requiredSystemFeatures = [ "recursive-nix" ]; buildInputs = [ nix jq ]; NIX_PATH = "nixpkgs=${}"; } diff --git a/src/libutil/file-descriptor.cc b/src/libutil/file-descriptor.cc index 542c33f3b..707c0f882 100644 --- a/src/libutil/file-descriptor.cc +++ b/src/libutil/file-descriptor.cc @@ -1,6 +1,3 @@ -#include "file-system.hh" -#include "signals.hh" -#include "finally.hh" #include "serialise.hh" #include "util.hh" diff --git a/src/libutil/json-utils.cc b/src/libutil/json-utils.cc index dff068e07..f67811e21 100644 --- a/src/libutil/json-utils.cc +++ b/src/libutil/json-utils.cc @@ -3,6 +3,7 @@ #include "types.hh" #include #include +#include namespace nix { @@ -38,6 +39,15 @@ std::optional optionalValueAt(const nlohmann::json::object_t & m return std::optional { map.at(key) }; } +std::optional nullableValueAt(const nlohmann::json::object_t & map, const std::string & key) +{ + auto value = valueAt(map, key); + + if (value.is_null()) + return std::nullopt; + + return std::optional { std::move(value) }; +} const nlohmann::json * getNullable(const nlohmann::json & value) { diff --git a/src/libutil/json-utils.hh b/src/libutil/json-utils.hh index 546334e1e..1afc5d796 100644 --- a/src/libutil/json-utils.hh +++ b/src/libutil/json-utils.hh @@ -25,6 +25,7 @@ const nlohmann::json & valueAt( const std::string & key); std::optional optionalValueAt(const nlohmann::json::object_t & value, const std::string & key); +std::optional nullableValueAt(const nlohmann::json::object_t & value, const std::string & key); /** * Downcast the json object, failing with a nice error if the conversion fails. @@ -69,6 +70,9 @@ struct json_avoids_null> : std::true_type {}; template struct json_avoids_null> : std::true_type {}; +template +struct json_avoids_null> : std::true_type {}; + template struct json_avoids_null> : std::true_type {}; diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index a5add5565..deeae120a 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -29,7 +29,7 @@ void setCurActivity(const ActivityId activityId) curActivity = activityId; } -Logger * logger = makeSimpleLogger(true); +std::unique_ptr logger = makeSimpleLogger(true); void Logger::warn(const std::string & msg) { @@ -128,9 +128,9 @@ void writeToStderr(std::string_view s) } } -Logger * makeSimpleLogger(bool printBuildLogs) +std::unique_ptr makeSimpleLogger(bool printBuildLogs) { - return new SimpleLogger(printBuildLogs); + return std::make_unique(printBuildLogs); } std::atomic nextId{0}; @@ -167,9 +167,9 @@ void to_json(nlohmann::json & json, std::shared_ptr pos) } struct JSONLogger : Logger { - Logger & prevLogger; + Descriptor fd; - JSONLogger(Logger & prevLogger) : prevLogger(prevLogger) { } + JSONLogger(Descriptor fd) : fd(fd) { } bool isVerbose() override { return true; @@ -190,7 +190,7 @@ struct JSONLogger : Logger { void write(const nlohmann::json & json) { - prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace)); + writeLine(fd, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace)); } void log(Verbosity lvl, std::string_view s) override @@ -262,9 +262,9 @@ struct JSONLogger : Logger { } }; -Logger * makeJSONLogger(Logger & prevLogger) +std::unique_ptr makeJSONLogger(Descriptor fd) { - return new JSONLogger(prevLogger); + return std::make_unique(fd); } static Logger::Fields getFields(nlohmann::json & json) diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh index 11e4033a5..474283894 100644 --- a/src/libutil/logging.hh +++ b/src/libutil/logging.hh @@ -3,6 +3,7 @@ #include "error.hh" #include "config.hh" +#include "file-descriptor.hh" #include @@ -179,11 +180,11 @@ struct PushActivity ~PushActivity() { setCurActivity(prevAct); } }; -extern Logger * logger; +extern std::unique_ptr logger; -Logger * makeSimpleLogger(bool printBuildLogs = true); +std::unique_ptr makeSimpleLogger(bool printBuildLogs = true); -Logger * makeJSONLogger(Logger & prevLogger); +std::unique_ptr makeJSONLogger(Descriptor fd); /** * @param source A noun phrase describing the source of the message, e.g. "the builder". diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 9ee3770de..df459f0e5 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -153,6 +153,7 @@ sources = files( 'json-utils.cc', 'logging.cc', 'memory-source-accessor.cc', + 'mounted-source-accessor.cc', 'position.cc', 'posix-source-accessor.cc', 'references.cc', @@ -166,6 +167,7 @@ sources = files( 'tarfile.cc', 'terminal.cc', 'thread-pool.cc', + 'union-source-accessor.cc', 'unix-domain-socket.cc', 'url.cc', 'users.cc', diff --git a/src/libfetchers/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc similarity index 92% rename from src/libfetchers/mounted-source-accessor.cc rename to src/libutil/mounted-source-accessor.cc index 68f3a546b..79223d155 100644 --- a/src/libfetchers/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -1,4 +1,4 @@ -#include "mounted-source-accessor.hh" +#include "source-accessor.hh" namespace nix { @@ -23,12 +23,6 @@ struct MountedSourceAccessor : SourceAccessor return accessor->readFile(subpath); } - bool pathExists(const CanonPath & path) override - { - auto [accessor, subpath] = resolve(path); - return accessor->pathExists(subpath); - } - std::optional maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); @@ -69,6 +63,12 @@ struct MountedSourceAccessor : SourceAccessor path.pop(); } } + + std::optional getPhysicalPath(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->getPhysicalPath(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/package.nix b/src/libutil/package.nix index 2f19b5822..a0b80ade7 100644 --- a/src/libutil/package.nix +++ b/src/libutil/package.nix @@ -54,17 +54,6 @@ mkMesonLibrary (finalAttrs: { nlohmann_json ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - # - # TODO: change release process to add `pre` in `.version`, remove it - # before tagging, and restore after. - '' - chmod u+w ./.version - echo ${version} > ../../.version - ''; - mesonFlags = [ (lib.mesonEnable "cpuid" stdenv.hostPlatform.isx86_64) ]; diff --git a/src/libutil/source-accessor.hh b/src/libutil/source-accessor.hh index 42af8256a..79ae092ac 100644 --- a/src/libutil/source-accessor.hh +++ b/src/libutil/source-accessor.hh @@ -214,4 +214,12 @@ ref getFSSourceAccessor(); */ ref makeFSSourceAccessor(std::filesystem::path root); +ref makeMountedSourceAccessor(std::map> mounts); + +/** + * Construct an accessor that presents a "union" view of a vector of + * underlying accessors. Earlier accessors take precedence over later. + */ +ref makeUnionSourceAccessor(std::vector> && accessors); + } diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc new file mode 100644 index 000000000..eec0850c2 --- /dev/null +++ b/src/libutil/union-source-accessor.cc @@ -0,0 +1,82 @@ +#include "source-accessor.hh" + +namespace nix { + +struct UnionSourceAccessor : SourceAccessor +{ + std::vector> accessors; + + UnionSourceAccessor(std::vector> _accessors) + : accessors(std::move(_accessors)) + { + displayPrefix.clear(); + } + + std::string readFile(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (st) + return accessor->readFile(path); + } + throw FileNotFound("path '%s' does not exist", showPath(path)); + } + + std::optional maybeLstat(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (st) + return st; + } + return std::nullopt; + } + + DirEntries readDirectory(const CanonPath & path) override + { + DirEntries result; + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (!st) + continue; + for (auto & entry : accessor->readDirectory(path)) + // Don't override entries from previous accessors. + result.insert(entry); + } + return result; + } + + std::string readLink(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto st = accessor->maybeLstat(path); + if (st) + return accessor->readLink(path); + } + throw FileNotFound("path '%s' does not exist", showPath(path)); + } + + std::string showPath(const CanonPath & path) override + { + for (auto & accessor : accessors) + return accessor->showPath(path); + return SourceAccessor::showPath(path); + } + + std::optional getPhysicalPath(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto p = accessor->getPhysicalPath(path); + if (p) + return p; + } + return std::nullopt; + } +}; + +ref makeUnionSourceAccessor(std::vector> && accessors) +{ + return make_ref(std::move(accessors)); +} + +} diff --git a/src/libutil/unix/processes.cc b/src/libutil/unix/processes.cc index 43d9179d9..a9df1be60 100644 --- a/src/libutil/unix/processes.cc +++ b/src/libutil/unix/processes.cc @@ -200,8 +200,15 @@ static int childEntry(void * arg) pid_t startProcess(std::function fun, const ProcessOptions & options) { ChildWrapperFunction wrapper = [&] { - if (!options.allowVfork) + if (!options.allowVfork) { + /* Set a simple logger, while releasing (not destroying) + the parent logger. We don't want to run the parent + logger's destructor since that will crash (e.g. when + ~ProgressBar() tries to join a thread that doesn't + exist. */ + logger.release(); logger = makeSimpleLogger(); + } try { #if __linux__ if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index de01e1afc..a5ae12a12 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -11,6 +11,7 @@ #include "current-process.hh" #include "parsed-derivations.hh" +#include "derivation-options.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "globals.hh" @@ -27,6 +28,7 @@ #include "users.hh" #include "network-proxy.hh" #include "compatibility-settings.hh" +#include "man-pages.hh" using namespace nix; using namespace std::string_literals; @@ -542,12 +544,13 @@ static void main_nix_build(int argc, char * * argv) env["NIX_STORE"] = store->storeDir; env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores); - auto passAsFile = tokenizeString(getOr(drv.env, "passAsFile", "")); + ParsedDerivation parsedDrv(packageInfo.requireDrvPath(), drv); + DerivationOptions drvOptions = DerivationOptions::fromParsedDerivation(parsedDrv); int fileNr = 0; for (auto & var : drv.env) - if (passAsFile.count(var.first)) { + if (drvOptions.passAsFile.count(var.first)) { auto fn = ".attr-" + std::to_string(fileNr++); Path p = (tmpDir.path() / fn).string(); writeFile(p, var.second); @@ -557,7 +560,7 @@ static void main_nix_build(int argc, char * * argv) std::string structuredAttrsRC; - if (env.count("__json")) { + if (parsedDrv.hasStructuredAttrs()) { StorePathSet inputs; std::function::ChildNode &)> accumInputClosure; @@ -575,8 +578,6 @@ static void main_nix_build(int argc, char * * argv) for (const auto & [inputDrv, inputNode] : drv.inputDrvs.map) accumInputClosure(inputDrv, inputNode); - ParsedDerivation parsedDrv(packageInfo.requireDrvPath(), drv); - if (auto structAttrs = parsedDrv.prepareStructuredAttrs(*store, inputs)) { auto json = structAttrs.value(); structuredAttrsRC = writeStructuredAttrsShell(json); diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 56d1d7abb..ee61db994 100644 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -8,6 +8,7 @@ #include "users.hh" #include "tarball.hh" #include "self-exe.hh" +#include "man-pages.hh" #include #include diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index 20d5161df..a060a01fd 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -7,6 +7,7 @@ #include "shared.hh" #include "globals.hh" #include "legacy.hh" +#include "man-pages.hh" #include #include diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index b64af758f..15bff0a0a 100644 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -2,6 +2,7 @@ #include "realisation.hh" #include "store-api.hh" #include "legacy.hh" +#include "man-pages.hh" using namespace nix; diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index c99c1088e..aa1edb4c8 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -17,6 +17,7 @@ #include "legacy.hh" #include "eval-settings.hh" // for defexpr #include "terminal.hh" +#include "man-pages.hh" #include #include diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 09d354832..0cf926369 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -12,6 +12,7 @@ #include "local-fs-store.hh" #include "common-eval-args.hh" #include "legacy.hh" +#include "man-pages.hh" #include #include diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 89eaf3584..d182b1eee 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -12,6 +12,7 @@ #include "legacy.hh" #include "posix-source-accessor.hh" #include "path-with-outputs.hh" +#include "man-pages.hh" #ifndef _WIN32 // TODO implement on Windows or provide allowed-to-noop interface # include "local-store.hh" diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 4b32020af..e2099c401 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -216,7 +216,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON auto & flake = lockedFlake.flake; // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(sourcePathToStorePath(store, flake.path).first); + auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); if (json) { nlohmann::json j; @@ -1079,7 +1079,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun StorePathSet sources; - auto storePath = sourcePathToStorePath(store, flake.flake.path).first; + auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; sources.insert(storePath); @@ -1090,19 +1090,21 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun nlohmann::json jsonObj2 = json ? json::object() : nlohmann::json(nullptr); for (auto & [inputName, input] : node.inputs) { if (auto inputNode = std::get_if<0>(&input)) { - auto storePath = - dryRun - ? (*inputNode)->lockedRef.input.computeStorePath(*store) - : (*inputNode)->lockedRef.input.fetchToStore(store).first; - if (json) { - auto& jsonObj3 = jsonObj2[inputName]; - jsonObj3["path"] = store->printStorePath(storePath); - sources.insert(std::move(storePath)); - jsonObj3["inputs"] = traverse(**inputNode); - } else { - sources.insert(std::move(storePath)); - traverse(**inputNode); + std::optional storePath; + if (!(*inputNode)->lockedRef.input.isRelative()) { + storePath = + dryRun + ? (*inputNode)->lockedRef.input.computeStorePath(*store) + : (*inputNode)->lockedRef.input.fetchToStore(store).first; + sources.insert(*storePath); } + if (json) { + auto & jsonObj3 = jsonObj2[inputName]; + if (storePath) + jsonObj3["path"] = store->printStorePath(*storePath); + jsonObj3["inputs"] = traverse(**inputNode); + } else + traverse(**inputNode); } } return jsonObj2; diff --git a/src/nix/hash.cc b/src/nix/hash.cc index eac421d12..91bba47f4 100644 --- a/src/nix/hash.cc +++ b/src/nix/hash.cc @@ -8,6 +8,7 @@ #include "git.hh" #include "posix-source-accessor.hh" #include "misc-store-flags.hh" +#include "man-pages.hh" using namespace nix; diff --git a/src/nix/main.cc b/src/nix/main.cc index 80ef53084..c5e9c0e7f 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -388,8 +388,6 @@ void mainWrapped(int argc, char * * argv) } #endif - Finally f([] { logger->stop(); }); - programPath = argv[0]; auto programName = std::string(baseNameOf(programPath)); auto extensionPos = programName.find_last_of("."); @@ -557,6 +555,8 @@ void mainWrapped(int argc, char * * argv) int main(int argc, char * * argv) { + // The CLI has a more detailed version than the libraries; see nixVersion. + nix::nixVersion = NIX_CLI_VERSION; #ifndef _WIN32 // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. diff --git a/src/nix/man-pages.cc b/src/nix/man-pages.cc new file mode 100644 index 000000000..e9e89bb62 --- /dev/null +++ b/src/nix/man-pages.cc @@ -0,0 +1,29 @@ +#include "man-pages.hh" +#include "file-system.hh" +#include "current-process.hh" +#include "environment-variables.hh" + +namespace nix { + +std::filesystem::path getNixManDir() +{ + return canonPath(NIX_MAN_DIR); +} + +void showManPage(const std::string & name) +{ + restoreProcessContext(); + setEnv("MANPATH", (getNixManDir().string() + ":").c_str()); + execlp("man", "man", name.c_str(), nullptr); + if (errno == ENOENT) { + // Not SysError because we don't want to suffix the errno, aka No such file or directory. + throw Error( + "The '%1%' command was not found, but it is needed for '%2%' and some other '%3%' commands' help text. Perhaps you could install the '%1%' command?", + "man", + name.c_str(), + "nix-*"); + } + throw SysError("command 'man %1%' failed", name.c_str()); +} + +} diff --git a/src/nix/man-pages.hh b/src/nix/man-pages.hh new file mode 100644 index 000000000..9ba035af8 --- /dev/null +++ b/src/nix/man-pages.hh @@ -0,0 +1,28 @@ +#pragma once +///@file + +#include +#include + +namespace nix { + +/** + * @brief Get path to the nix manual dir. + * + * Nix relies on the man pages being available at a NIX_MAN_DIR for + * displaying help messaged for legacy cli. + * + * NIX_MAN_DIR is a compile-time parameter, so man pages are unlikely to work + * for cases when the nix executable is installed out-of-store or as a static binary. + * + */ +std::filesystem::path getNixManDir(); + +/** + * Show the manual page for the specified program. + * + * @param name Name of the man item. + */ +void showManPage(const std::string & name); + +} diff --git a/src/nix/meson.build b/src/nix/meson.build index 2698cc873..398750498 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -35,6 +35,9 @@ subdir('nix-meson-build-support/windows-version') configdata = configuration_data() +# The CLI has a more detailed version string than the libraries; see `nixVersion` +configdata.set_quoted('NIX_CLI_VERSION', meson.project_version()) + fs = import('fs') bindir = get_option('bindir') @@ -90,6 +93,7 @@ nix_sources = [config_h] + files( 'ls.cc', 'main.cc', 'make-content-addressed.cc', + 'man-pages.cc', 'nar.cc', 'optimise-store.cc', 'path-from-hash-part.cc', @@ -182,6 +186,16 @@ if host_machine.system() != 'windows' ] endif +fs = import('fs') +prefix = get_option('prefix') + +mandir = get_option('mandir') +mandir = fs.is_absolute(mandir) ? mandir : prefix / mandir + +cpp_args= [ + '-DNIX_MAN_DIR="@0@"'.format(mandir) +] + include_dirs = [include_directories('.')] this_exe = executable( @@ -189,6 +203,7 @@ this_exe = executable( sources, dependencies : deps_private_subproject + deps_private + deps_other, include_directories : include_dirs, + cpp_args : cpp_args, link_args: linker_export_flags, install : true, ) @@ -223,7 +238,7 @@ foreach linkname : nix_symlinks # The 'runtime' tag is what executables default to, which we want to emulate here. install_tag : 'runtime' ) - t = custom_target( + custom_target( command: ['ln', '-sf', fs.name(this_exe), '@OUTPUT@'], output: linkname + executable_suffix, # native doesn't allow dangling symlinks, but the target executable often doesn't exist at this time diff --git a/src/nix/package.nix b/src/nix/package.nix index 6e59adc38..40a280437 100644 --- a/src/nix/package.nix +++ b/src/nix/package.nix @@ -91,14 +91,6 @@ mkMesonExecutable (finalAttrs: { nix-cmd ]; - preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../../.version - ''; - mesonFlags = [ ]; diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index db7d9e4ef..84c0224e2 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -12,6 +12,7 @@ #include "posix-source-accessor.hh" #include "misc-store-flags.hh" #include "terminal.hh" +#include "man-pages.hh" #include diff --git a/src/nix/unix/daemon.cc b/src/nix/unix/daemon.cc index 746963a01..b4c7c10ed 100644 --- a/src/nix/unix/daemon.cc +++ b/src/nix/unix/daemon.cc @@ -15,6 +15,7 @@ #include "finally.hh" #include "legacy.hh" #include "daemon.hh" +#include "man-pages.hh" #include #include diff --git a/src/perl/lib/Nix/Store.xs b/src/perl/lib/Nix/Store.xs index 172c3500d..cfc3ac034 100644 --- a/src/perl/lib/Nix/Store.xs +++ b/src/perl/lib/Nix/Store.xs @@ -194,7 +194,7 @@ StoreWrapper::computeFSClosure(int flipDirection, int includeOutputs, ...) PPCODE: try { StorePathSet paths; - for (int n = 2; n < items; ++n) + for (int n = 3; n < items; ++n) THIS->store->computeFSClosure(THIS->store->parseStorePath(SvPV_nolen(ST(n))), paths, flipDirection, includeOutputs); for (auto & i : paths) XPUSHs(sv_2mortal(newSVpv(THIS->store->printStorePath(i).c_str(), 0))); @@ -208,7 +208,7 @@ StoreWrapper::topoSortPaths(...) PPCODE: try { StorePathSet paths; - for (int n = 0; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); + for (int n = 1; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); auto sorted = THIS->store->topoSortPaths(paths); for (auto & i : sorted) XPUSHs(sv_2mortal(newSVpv(THIS->store->printStorePath(i).c_str(), 0))); @@ -234,7 +234,7 @@ StoreWrapper::exportPaths(int fd, ...) PPCODE: try { StorePathSet paths; - for (int n = 1; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); + for (int n = 2; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); FdSink sink(fd); THIS->store->exportPaths(paths, sink); } catch (Error & e) { diff --git a/src/perl/meson.build b/src/perl/meson.build index 7b3716c17..599e91710 100644 --- a/src/perl/meson.build +++ b/src/perl/meson.build @@ -57,10 +57,10 @@ libdir = join_paths(prefix, get_option('libdir')) # Required Programs #------------------------------------------------- -xz = find_program('xz') +find_program('xz') xsubpp = find_program('xsubpp') perl = find_program('perl') -curl = find_program('curl') +find_program('curl') yath = find_program('yath', required : false) # Required Libraries @@ -157,7 +157,7 @@ subdir(lib_dir) if get_option('tests').enabled() yath_rc_conf = configuration_data() yath_rc_conf.set('lib_dir', lib_dir) - yath_rc = configure_file( + configure_file( output : '.yath.rc', input : '.yath.rc.in', configuration : yath_rc_conf, diff --git a/tests/functional/chroot-store.sh b/tests/functional/chroot-store.sh index ccde3e90b..7300f04ba 100755 --- a/tests/functional/chroot-store.sh +++ b/tests/functional/chroot-store.sh @@ -2,6 +2,28 @@ source common.sh +# Regression test for #11503. +mkdir -p "$TEST_ROOT/directory" +cat > "$TEST_ROOT/directory/default.nix" < "$TEST_ROOT"/example.txt mkdir -p "$TEST_ROOT/x" diff --git a/tests/functional/common/functions.sh b/tests/functional/common/functions.sh index bf3dd2ca8..1b2ec8fe0 100644 --- a/tests/functional/common/functions.sh +++ b/tests/functional/common/functions.sh @@ -67,7 +67,7 @@ startDaemon() { die "startDaemon: not supported when testing on NixOS. Is it really needed? If so add conditionals; e.g. if ! isTestOnNixOS; then ..." fi - # Don’t start the daemon twice, as this would just make it loop indefinitely + # Don't start the daemon twice, as this would just make it loop indefinitely. if [[ "${_NIX_TEST_DAEMON_PID-}" != '' ]]; then return fi @@ -76,15 +76,19 @@ startDaemon() { PATH=$DAEMON_PATH nix --extra-experimental-features 'nix-command' daemon & _NIX_TEST_DAEMON_PID=$! export _NIX_TEST_DAEMON_PID - for ((i = 0; i < 300; i++)); do + for ((i = 0; i < 60; i++)); do if [[ -S $NIX_DAEMON_SOCKET_PATH ]]; then DAEMON_STARTED=1 break; fi + if ! kill -0 "$_NIX_TEST_DAEMON_PID"; then + echo "daemon died unexpectedly" >&2 + exit 1 + fi sleep 0.1 done if [[ -z ${DAEMON_STARTED+x} ]]; then - fail "Didn’t manage to start the daemon" + fail "Didn't manage to start the daemon" fi trap "killDaemon" EXIT # Save for if daemon is killed @@ -97,7 +101,7 @@ killDaemon() { die "killDaemon: not supported when testing on NixOS. Is it really needed? If so add conditionals; e.g. if ! isTestOnNixOS; then ..." fi - # Don’t fail trying to stop a non-existant daemon twice + # Don't fail trying to stop a non-existant daemon twice. if [[ "${_NIX_TEST_DAEMON_PID-}" == '' ]]; then return fi @@ -219,7 +223,7 @@ assertStderr() { needLocalStore() { if [[ "$NIX_REMOTE" == "daemon" ]]; then - skipTest "Can’t run through the daemon ($1)" + skipTest "Can't run through the daemon ($1)" fi } diff --git a/tests/functional/dyn-drv/build-built-drv.sh b/tests/functional/dyn-drv/build-built-drv.sh index 647be9457..fcb25a34b 100644 --- a/tests/functional/dyn-drv/build-built-drv.sh +++ b/tests/functional/dyn-drv/build-built-drv.sh @@ -18,4 +18,9 @@ clearStore drvDep=$(nix-instantiate ./text-hashed-output.nix -A producingDrv) -expectStderr 1 nix build "${drvDep}^out^out" --no-link | grepQuiet "Building dynamic derivations in one shot is not yet implemented" +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250205" + +out2=$(nix build "${drvDep}^out^out" --no-link) + +test $out1 == $out2 diff --git a/tests/functional/dyn-drv/dep-built-drv-2.sh b/tests/functional/dyn-drv/dep-built-drv-2.sh new file mode 100644 index 000000000..531af6bf7 --- /dev/null +++ b/tests/functional/dyn-drv/dep-built-drv-2.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +source common.sh + +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250205" + +TODO_NixOS # can't enable a sandbox feature easily + +enableFeatures 'recursive-nix' +restartDaemon + +NIX_BIN_DIR="$(dirname "$(type -p nix)")" +export NIX_BIN_DIR + +nix build -L --file ./non-trivial.nix --no-link diff --git a/tests/functional/dyn-drv/dep-built-drv.sh b/tests/functional/dyn-drv/dep-built-drv.sh index 4f6e9b080..9d470099a 100644 --- a/tests/functional/dyn-drv/dep-built-drv.sh +++ b/tests/functional/dyn-drv/dep-built-drv.sh @@ -4,8 +4,11 @@ source common.sh out1=$(nix-build ./text-hashed-output.nix -A hello --no-out-link) +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250205" + clearStore -expectStderr 1 nix-build ./text-hashed-output.nix -A wrapper --no-out-link | grepQuiet "Building dynamic derivations in one shot is not yet implemented" +out2=$(nix-build ./text-hashed-output.nix -A wrapper --no-out-link) -# diff -r $out1 $out2 +diff -r $out1 $out2 diff --git a/tests/functional/dyn-drv/failing-outer.sh b/tests/functional/dyn-drv/failing-outer.sh new file mode 100644 index 000000000..d888ea876 --- /dev/null +++ b/tests/functional/dyn-drv/failing-outer.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source common.sh + +# Store layer needs bugfix +requireDaemonNewerThan "2.27pre20250205" + +expected=100 +if [[ -v NIX_DAEMON_PACKAGE ]]; then expected=1; fi # work around the daemon not returning a 100 status correctly + +expectStderr "$expected" nix-build ./text-hashed-output.nix -A failingWrapper --no-out-link \ + | grepQuiet "build of '.*use-dynamic-drv-in-non-dynamic-drv-wrong.drv' failed" diff --git a/tests/functional/dyn-drv/meson.build b/tests/functional/dyn-drv/meson.build index 5b60a4698..07145000d 100644 --- a/tests/functional/dyn-drv/meson.build +++ b/tests/functional/dyn-drv/meson.build @@ -12,8 +12,10 @@ suites += { 'recursive-mod-json.sh', 'build-built-drv.sh', 'eval-outputOf.sh', + 'failing-outer.sh', 'dep-built-drv.sh', 'old-daemon-error-hack.sh', + 'dep-built-drv-2.sh', ], 'workdir': meson.current_source_dir(), } diff --git a/tests/functional/dyn-drv/non-trivial.nix b/tests/functional/dyn-drv/non-trivial.nix new file mode 100644 index 000000000..5cfafbb62 --- /dev/null +++ b/tests/functional/dyn-drv/non-trivial.nix @@ -0,0 +1,77 @@ +with import ./config.nix; + +builtins.outputOf + (mkDerivation { + name = "make-derivations.drv"; + + requiredSystemFeatures = [ "recursive-nix" ]; + + buildCommand = '' + set -e + set -u + + PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH + + export NIX_CONFIG='extra-experimental-features = nix-command ca-derivations dynamic-derivations' + + declare -A deps=( + [a]="" + [b]="a" + [c]="a" + [d]="b c" + [e]="b c d" + ) + + # Cannot just literally include this, or Nix will think it is the + # *outer* derivation that's trying to refer to itself, and + # substitute the string too soon. + placeholder=$(nix eval --raw --expr 'builtins.placeholder "out"') + + declare -A drvs=() + for word in a b c d e; do + inputDrvs="" + for dep in ''${deps[$word]}; do + if [[ "$inputDrvs" != "" ]]; then + inputDrvs+="," + fi + read -r -d "" line <> \"\$out\""], + "builder": "${shell}", + "env": { + "out": "$placeholder", + "$word": "hello, from $word!", + "PATH": ${builtins.toJSON path} + }, + "inputDrvs": { + $inputDrvs + }, + "inputSrcs": [], + "name": "build-$word", + "outputs": { + "out": { + "method": "nar", + "hashAlgo": "sha256" + } + }, + "system": "${system}" + } + EOF + drvs[$word]="$(echo "$json" | nix derivation add)" + done + cp "''${drvs[e]}" $out + ''; + + __contentAddressed = true; + outputHashMode = "text"; + outputHashAlgo = "sha256"; + }).outPath + "out" diff --git a/tests/functional/dyn-drv/text-hashed-output.nix b/tests/functional/dyn-drv/text-hashed-output.nix index 65d7ab35a..59261bbbf 100644 --- a/tests/functional/dyn-drv/text-hashed-output.nix +++ b/tests/functional/dyn-drv/text-hashed-output.nix @@ -13,6 +13,7 @@ rec { echo "Hello World" > $out/hello ''; }; + producingDrv = mkDerivation { name = "hello.drv"; buildCommand = '' @@ -23,6 +24,7 @@ rec { outputHashMode = "text"; outputHashAlgo = "sha256"; }; + wrapper = mkDerivation { name = "use-dynamic-drv-in-non-dynamic-drv"; buildCommand = '' @@ -30,4 +32,12 @@ rec { cp -r ${builtins.outputOf producingDrv.outPath "out"} $out ''; }; + + failingWrapper = mkDerivation { + name = "use-dynamic-drv-in-non-dynamic-drv-wrong"; + buildCommand = '' + echo "Fail at copying the output of the dynamic derivation" + fail ${builtins.outputOf producingDrv.outPath "out"} $out + ''; + }; } diff --git a/tests/functional/flakes/common.sh b/tests/functional/flakes/common.sh index b1c3988e3..06e414e9d 100644 --- a/tests/functional/flakes/common.sh +++ b/tests/functional/flakes/common.sh @@ -99,6 +99,16 @@ writeTrivialFlake() { EOF } +initGitRepo() { + local repo="$1" + local extraArgs="${2-}" + + # shellcheck disable=SC2086 # word splitting of extraArgs is intended + git -C "$repo" init $extraArgs + git -C "$repo" config user.email "foobar@example.com" + git -C "$repo" config user.name "Foobar" +} + createGitRepo() { local repo="$1" local extraArgs="${2-}" @@ -107,7 +117,5 @@ createGitRepo() { mkdir -p "$repo" # shellcheck disable=SC2086 # word splitting of extraArgs is intended - git -C "$repo" init $extraArgs - git -C "$repo" config user.email "foobar@example.com" - git -C "$repo" config user.name "Foobar" + initGitRepo "$repo" $extraArgs } diff --git a/tests/functional/flakes/relative-paths.sh b/tests/functional/flakes/relative-paths.sh index 9b93da9c1..3f7ca3f46 100644 --- a/tests/functional/flakes/relative-paths.sh +++ b/tests/functional/flakes/relative-paths.sh @@ -45,7 +45,7 @@ EOF [[ $(nix eval "$rootFlake?dir=sub1#y") = 6 ]] -git init "$rootFlake" +initGitRepo "$rootFlake" git -C "$rootFlake" add flake.nix sub0/flake.nix sub1/flake.nix [[ $(nix eval "$subflake1#y") = 6 ]] @@ -76,6 +76,19 @@ if ! isTestOnNixOS; then fi (! grep narHash "$subflake2/flake.lock") +# Test `nix flake archive` with relative path flakes. +git -C "$rootFlake" add flake.lock +git -C "$rootFlake" commit -a -m Foo + +json=$(nix flake archive --json "$rootFlake" --to "$TEST_ROOT/store2") +[[ $(echo "$json" | jq .inputs.sub0.inputs) = {} ]] +[[ -n $(echo "$json" | jq .path) ]] + +nix flake prefetch --out-link "$TEST_ROOT/result" "$rootFlake" +outPath=$(readlink "$TEST_ROOT/result") + +[ -e "$TEST_ROOT/store2/nix/store/$(basename "$outPath")" ] + # Test circular relative path flakes. FIXME: doesn't work at the moment. if false; then diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 3342ee870..af95879fb 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -37,7 +37,7 @@ test_confdata = { # Done as a subdir() so Meson places it under `common` in the build directory as well. subdir('common') -config_nix_in = configure_file( +configure_file( input : 'config.nix.in', output : 'config.nix', configuration : test_confdata, diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index 7d63756b7..cb4d4139f 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -11,7 +11,7 @@ source common.sh #nix-hash --help | grepQuiet base32 # Can we ask for the version number? -nix-env --version | grep "$version" +nix-env --version | grep -F "${_NIX_TEST_CLIENT_VERSION:-$version}" nix_env=$(type -P nix-env) (PATH=""; ! $nix_env --help 2>&1 ) | grepQuiet -F "The 'man' command was not found, but it is needed for 'nix-env' and some other 'nix-*' commands' help text. Perhaps you could install the 'man' command?" diff --git a/tests/functional/package.nix b/tests/functional/package.nix index 74c034196..a84ad1791 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -75,16 +75,10 @@ mkMesonDerivation ( ]; preConfigure = - # "Inline" .version so it's not a symlink, and includes the suffix. - # Do the meson utils, without modification. - '' - chmod u+w ./.version - echo ${version} > ../../../.version - '' # TEMP hack for Meson before make is gone, where # `src/nix-functional-tests` is during the transition a symlink and # not the actual directory directory. - + '' + '' cd $(readlink -e $PWD) echo $PWD | grep tests/functional ''; @@ -105,6 +99,8 @@ mkMesonDerivation ( } // lib.optionalAttrs (test-daemon != null) { + # TODO rename to _NIX_TEST_DAEMON_PACKAGE NIX_DAEMON_PACKAGE = test-daemon; + _NIX_TEST_CLIENT_VERSION = nix-cli.version; } ) diff --git a/tests/nixos/fetch-git/test-cases/lfs/default.nix b/tests/nixos/fetch-git/test-cases/lfs/default.nix new file mode 100644 index 000000000..686796fcc --- /dev/null +++ b/tests/nixos/fetch-git/test-cases/lfs/default.nix @@ -0,0 +1,228 @@ +{ + # mostly copied from https://github.com/NixOS/nix/blob/358c26fd13a902d9a4032a00e6683571be07a384/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix#L1 + # ty @DavHau + description = "fetchGit smudges LFS pointers if lfs=true"; + script = '' + from tempfile import TemporaryDirectory + + expected_max_size_lfs_pointer = 1024 # 1 KiB (values >= than this cannot be pointers, and test files are 1 MiB) + + # purge nix git cache to make sure we start with a clean slate + client.succeed("rm -rf ~/.cache/nix") + + + with subtest("Request lfs fetch without any .gitattributes file"): + client.succeed(f"dd if=/dev/urandom of={repo.path}/regular bs=1M count=1 >&2") + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'no .gitattributes' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + no_gitattributes_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + # fetch with lfs=true, and check that the lack of .gitattributes does not break anything + fetchGit_no_gitattributes_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{no_gitattributes_rev}"; + ref = "main"; + lfs = true; + }} + """ + fetched_no_gitattributes = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_no_gitattributes_expr}).outPath' + """) + client.succeed(f"cmp {repo.path}/regular {fetched_no_gitattributes}/regular >&2") + + + with subtest("Add a file that should be tracked by lfs, but isn't"): + # (git lfs cli only throws a warning "Encountered 1 file that should have + # been a pointer, but wasn't") + + client.succeed(f"dd if=/dev/urandom of={repo.path}/black_sheep bs=1M count=1 >&2") + client.succeed(f"echo 'black_sheep filter=lfs -text' >>{repo.path}/.gitattributes") + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'add misleading file' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + bad_lfs_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + # test assumption that it can be cloned with regular git first + # (here we see the warning as stated above) + with TemporaryDirectory() as tempdir: + client.succeed(f"git clone -n {repo.remote} {tempdir} >&2") + client.succeed(f"git -C {tempdir} lfs install >&2") + client.succeed(f"git -C {tempdir} checkout {bad_lfs_rev} >&2") + + # check that the file is not a pointer, as expected + file_size_git = client.succeed(f"stat -c %s {tempdir}/black_sheep").strip() + assert int(file_size_git) == 1024 * 1024, \ + f"non lfs file is {file_size_git}b (!= 1MiB), probably a test implementation error" + + lfs_files = client.succeed(f"git -C {tempdir} lfs ls-files").strip() + assert lfs_files == "", "non lfs file is tracked by lfs, probably a test implementation error" + + client.succeed(f"cmp {repo.path}/black_sheep {tempdir}/black_sheep >&2") + + # now fetch without lfs, check that the file is not a pointer + fetchGit_bad_lfs_without_lfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{bad_lfs_rev}"; + ref = "main"; + lfs = false; + }} + """ + fetched_bad_lfs_without_lfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_bad_lfs_without_lfs_expr}).outPath' + """) + + # check that file was not somehow turned into a pointer + file_size_bad_lfs_without_lfs = client.succeed(f"stat -c %s {fetched_bad_lfs_without_lfs}/black_sheep").strip() + + assert int(file_size_bad_lfs_without_lfs) == 1024 * 1024, \ + f"non lfs-enrolled file is {file_size_bad_lfs_without_lfs}b (!= 1MiB), probably a test implementation error" + client.succeed(f"cmp {repo.path}/black_sheep {fetched_bad_lfs_without_lfs}/black_sheep >&2") + + # finally fetch with lfs=true, and check that the bad file does not break anything + fetchGit_bad_lfs_with_lfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{bad_lfs_rev}"; + ref = "main"; + lfs = true; + }} + """ + fetchGit_bad_lfs_with_lfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_bad_lfs_with_lfs_expr}).outPath' + """) + + client.succeed(f"cmp {repo.path}/black_sheep {fetchGit_bad_lfs_with_lfs}/black_sheep >&2") + + + with subtest("Add an lfs-enrolled file to the repo"): + client.succeed(f"dd if=/dev/urandom of={repo.path}/beeg bs=1M count=1 >&2") + client.succeed(f"{repo.git} lfs install >&2") + client.succeed(f"{repo.git} lfs track --filename beeg >&2") + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'add lfs file' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + lfs_file_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + # first fetch without lfs, check that we did not smudge the file + fetchGit_nolfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{lfs_file_rev}"; + ref = "main"; + lfs = false; + }} + """ + fetched_nolfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_nolfs_expr}).outPath' + """) + + # check that file was not smudged + file_size_nolfs = client.succeed(f"stat -c %s {fetched_nolfs}/beeg").strip() + + assert int(file_size_nolfs) < expected_max_size_lfs_pointer, \ + f"did not set lfs=true, yet lfs-enrolled file is {file_size_nolfs}b (>= 1KiB), probably smudged when we should not have" + + # now fetch with lfs=true and check that the file was smudged + fetchGit_lfs_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{lfs_file_rev}"; + ref = "main"; + lfs = true; + }} + """ + fetched_lfs = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_lfs_expr}).outPath' + """) + + assert fetched_lfs != fetched_nolfs, \ + f"fetching with and without lfs yielded the same store path {fetched_lfs}, fingerprinting error?" + + # check that file was smudged + file_size_lfs = client.succeed(f"stat -c %s {fetched_lfs}/beeg").strip() + assert int(file_size_lfs) == 1024 * 1024, \ + f"set lfs=true, yet lfs-enrolled file is {file_size_lfs}b (!= 1MiB), probably did not smudge when we should have" + + + with subtest("Check that default is lfs=false"): + fetchGit_default_expr = f""" + builtins.fetchGit {{ + url = "{repo.remote}"; + rev = "{lfs_file_rev}"; + ref = "main"; + }} + """ + fetched_default = client.succeed(f""" + nix eval --debug --impure --raw --expr '({fetchGit_default_expr}).outPath' + """) + + # check that file was not smudged + file_size_default = client.succeed(f"stat -c %s {fetched_default}/beeg").strip() + + assert int(file_size_default) < expected_max_size_lfs_pointer, \ + f"did not set lfs, yet lfs-enrolled file is {file_size_default}b (>= 1KiB), probably bad default value" + + with subtest("Use as flake input"): + # May seem reduntant, but this has minor differences compared to raw + # fetchGit which caused failures before + with TemporaryDirectory() as tempdir: + client.succeed(f"mkdir -p {tempdir}") + client.succeed(f""" + printf '{{ + inputs = {{ + foo = {{ + url = "git+{repo.remote}?ref=main&rev={lfs_file_rev}&lfs=1"; + flake = false; + }}; + }}; + outputs = {{ foo, self }}: {{ inherit (foo) outPath; }}; + }}' >{tempdir}/flake.nix + """) + fetched_flake = client.succeed(f""" + nix eval --debug --raw {tempdir}#.outPath + """) + + assert fetched_lfs == fetched_flake, \ + f"fetching as flake input (store path {fetched_flake}) yielded a different result than using fetchGit (store path {fetched_lfs})" + + + with subtest("Check self.lfs"): + client.succeed(f""" + printf '{{ + inputs.self.lfs = true; + outputs = {{ self }}: {{ }}; + }}' >{repo.path}/flake.nix + """) + client.succeed(f"{repo.git} add : >&2") + client.succeed(f"{repo.git} commit -m 'add flake' >&2") + client.succeed(f"{repo.git} push origin main >&2") + + # memorize the revision + self_lfs_rev = client.succeed(f"{repo.git} rev-parse HEAD").strip() + + with TemporaryDirectory() as tempdir: + client.succeed(f"mkdir -p {tempdir}") + client.succeed(f""" + printf '{{ + inputs.foo = {{ + url = "git+{repo.remote}?ref=main&rev={self_lfs_rev}"; + }}; + outputs = {{ foo, self }}: {{ inherit (foo) outPath; }}; + }}' >{tempdir}/flake.nix + """) + fetched_self_lfs = client.succeed(f""" + nix eval --debug --raw {tempdir}#.outPath + """) + + client.succeed(f"cmp {repo.path}/beeg {fetched_self_lfs}/beeg >&2") + ''; +} diff --git a/tests/nixos/fetch-git/testsupport/gitea.nix b/tests/nixos/fetch-git/testsupport/gitea.nix index 9409acff7..e63182639 100644 --- a/tests/nixos/fetch-git/testsupport/gitea.nix +++ b/tests/nixos/fetch-git/testsupport/gitea.nix @@ -29,9 +29,16 @@ in { pkgs, ... }: { services.gitea.enable = true; - services.gitea.settings.service.DISABLE_REGISTRATION = true; - services.gitea.settings.log.LEVEL = "Info"; - services.gitea.settings.database.LOG_SQL = false; + services.gitea.lfs.enable = true; + services.gitea.settings = { + service.DISABLE_REGISTRATION = true; + server = { + DOMAIN = "gitea"; + HTTP_PORT = 3000; + }; + log.LEVEL = "Info"; + database.LOG_SQL = false; + }; services.openssh.enable = true; networking.firewall.allowedTCPPorts = [ 3000 ]; environment.systemPackages = [ @@ -54,7 +61,10 @@ in client = { pkgs, ... }: { - environment.systemPackages = [ pkgs.git ]; + environment.systemPackages = [ + pkgs.git + pkgs.git-lfs + ]; }; }; defaults =