1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-06-25 10:41:16 +02:00

Merge remote-tracking branch 'upstream/master' into lfs

This commit is contained in:
Leandro Reina 2025-01-21 14:16:42 +01:00
commit 40a3007b7c
89 changed files with 904 additions and 459 deletions

View file

@ -27,6 +27,7 @@ pull_request_rules:
branches: branches:
- 2.18-maintenance - 2.18-maintenance
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.19 - name: backport patches to 2.19
@ -37,6 +38,7 @@ pull_request_rules:
branches: branches:
- 2.19-maintenance - 2.19-maintenance
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.20 - name: backport patches to 2.20
@ -47,6 +49,7 @@ pull_request_rules:
branches: branches:
- 2.20-maintenance - 2.20-maintenance
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.21 - name: backport patches to 2.21
@ -57,6 +60,7 @@ pull_request_rules:
branches: branches:
- 2.21-maintenance - 2.21-maintenance
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.22 - name: backport patches to 2.22
@ -67,6 +71,7 @@ pull_request_rules:
branches: branches:
- 2.22-maintenance - 2.22-maintenance
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.23 - name: backport patches to 2.23
@ -77,6 +82,7 @@ pull_request_rules:
branches: branches:
- 2.23-maintenance - 2.23-maintenance
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.24 - name: backport patches to 2.24
@ -87,6 +93,7 @@ pull_request_rules:
branches: branches:
- "2.24-maintenance" - "2.24-maintenance"
labels: labels:
- automatic backport
- merge-queue - merge-queue
- name: backport patches to 2.25 - name: backport patches to 2.25
@ -97,4 +104,5 @@ pull_request_rules:
branches: branches:
- "2.25-maintenance" - "2.25-maintenance"
labels: labels:
- automatic backport
- merge-queue - merge-queue

View file

@ -0,0 +1,12 @@
---
synopsis: "Support for relative path inputs"
prs: [10089]
---
Flakes can now refer to other flakes in the same repository using relative paths, e.g.
```nix
inputs.foo.url = "path:./foo";
```
uses the flake in the `foo` subdirectory of the referring flake. For more information, see the documentation on [the `path` flake input type](@docroot@/command-ref/new-cli/nix3-flake.md#path-fetcher).
This feature required a change to the lock file format. Previous Nix versions will not be able to use lock files that have locks for relative path inputs in them.

View file

@ -11,6 +11,7 @@
[`--from-profile` *path*] [`--from-profile` *path*]
[`--preserve-installed` | `-P`] [`--preserve-installed` | `-P`]
[`--remove-all` | `-r`] [`--remove-all` | `-r`]
[`--priority` *priority*]
# Description # Description
@ -61,6 +62,10 @@ The arguments *args* map to store paths in a number of possible ways:
The derivations returned by those function calls are installed. The derivations returned by those function calls are installed.
This allows derivations to be specified in an unambiguous way, which is necessary if there are multiple derivations with the same name. This allows derivations to be specified in an unambiguous way, which is necessary if there are multiple derivations with the same name.
- If `--priority` *priority* is given, the priority of the derivations being installed is set to *priority*.
This can be used to override the priority of the derivations being installed.
This is useful if *args* are [store paths], which don't have any priority information.
- If *args* are [store derivations](@docroot@/glossary.md#gloss-store-derivation), then these are [realised], and the resulting output paths are installed. - If *args* are [store derivations](@docroot@/glossary.md#gloss-store-derivation), then these are [realised], and the resulting output paths are installed.
- If *args* are [store paths] that are not store derivations, then these are [realised] and installed. - If *args* are [store paths] that are not store derivations, then these are [realised] and installed.
@ -235,4 +240,3 @@ channel:
```console ```console
$ nix-env --file https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz --install --attr firefox $ nix-env --file https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz --install --attr firefox
``` ```

View file

@ -160,6 +160,6 @@ which you may remove.
To remove a [single-user installation](./installing-binary.md#single-user-installation) of Nix, run: To remove a [single-user installation](./installing-binary.md#single-user-installation) of Nix, run:
```console ```console
$ rm -rf /nix ~/.nix-channels ~/.nix-defexpr ~/.nix-profile rm -rf /nix ~/.nix-channels ~/.nix-defexpr ~/.nix-profile
``` ```
You might also want to manually remove references to Nix from your `~/.profile`. You might also want to manually remove references to Nix from your `~/.profile`.

View file

@ -107,6 +107,7 @@
in { in {
inherit stdenvs native; inherit stdenvs native;
static = native.pkgsStatic; static = native.pkgsStatic;
llvm = native.pkgsLLVM;
cross = forAllCrossSystems (crossSystem: make-pkgs crossSystem "stdenv"); cross = forAllCrossSystems (crossSystem: make-pkgs crossSystem "stdenv");
}); });
@ -282,6 +283,7 @@
# These attributes go right into `packages.<system>`. # These attributes go right into `packages.<system>`.
"${pkgName}" = nixpkgsFor.${system}.native.nixComponents.${pkgName}; "${pkgName}" = nixpkgsFor.${system}.native.nixComponents.${pkgName};
"${pkgName}-static" = nixpkgsFor.${system}.static.nixComponents.${pkgName}; "${pkgName}-static" = nixpkgsFor.${system}.static.nixComponents.${pkgName};
"${pkgName}-llvm" = nixpkgsFor.${system}.llvm.nixComponents.${pkgName};
} }
// lib.optionalAttrs supportsCross (flatMapAttrs (lib.genAttrs crossSystems (_: { })) (crossSystem: {}: { // lib.optionalAttrs supportsCross (flatMapAttrs (lib.genAttrs crossSystems (_: { })) (crossSystem: {}: {
# These attributes go right into `packages.<system>`. # These attributes go right into `packages.<system>`.
@ -321,6 +323,9 @@
prefixAttrs "static" (forAllStdenvs (stdenvName: makeShell { prefixAttrs "static" (forAllStdenvs (stdenvName: makeShell {
pkgs = nixpkgsFor.${system}.stdenvs."${stdenvName}Packages".pkgsStatic; pkgs = nixpkgsFor.${system}.stdenvs."${stdenvName}Packages".pkgsStatic;
})) // })) //
prefixAttrs "llvm" (forAllStdenvs (stdenvName: makeShell {
pkgs = nixpkgsFor.${system}.stdenvs."${stdenvName}Packages".pkgsLLVM;
})) //
prefixAttrs "cross" (forAllCrossSystems (crossSystem: makeShell { prefixAttrs "cross" (forAllCrossSystems (crossSystem: makeShell {
pkgs = nixpkgsFor.${system}.cross.${crossSystem}; pkgs = nixpkgsFor.${system}.cross.${crossSystem};
})) }))

View file

@ -10,6 +10,27 @@
# https://flake.parts/options/git-hooks-nix#options # https://flake.parts/options/git-hooks-nix#options
pre-commit.settings = { pre-commit.settings = {
hooks = { hooks = {
# Conflicts are usually found by other checks, but not those in docs,
# and potentially other places.
check-merge-conflicts.enable = true;
# built-in check-merge-conflicts seems ineffective against those produced by mergify backports
check-merge-conflicts-2 = {
enable = true;
entry = "${pkgs.writeScript "check-merge-conflicts" ''
#!${pkgs.runtimeShell}
conflicts=false
for file in "$@"; do
if grep --with-filename --line-number -E '^>>>>>>> ' -- "$file"; then
conflicts=true
fi
done
if $conflicts; then
echo "ERROR: found merge/patch conflicts in files"
exit 1
fi
touch $out
''}";
};
clang-format = { clang-format = {
enable = true; enable = true;
# https://github.com/cachix/git-hooks.nix/pull/532 # https://github.com/cachix/git-hooks.nix/pull/532

View file

@ -75,7 +75,11 @@ let
# Users who are debugging Nix builds are expected to set the environment variable `mesonBuildType`, per the # Users who are debugging Nix builds are expected to set the environment variable `mesonBuildType`, per the
# guidance in https://github.com/NixOS/nix/blob/8a3fc27f1b63a08ac983ee46435a56cf49ebaf4a/doc/manual/source/development/debugging.md?plain=1#L10. # guidance in https://github.com/NixOS/nix/blob/8a3fc27f1b63a08ac983ee46435a56cf49ebaf4a/doc/manual/source/development/debugging.md?plain=1#L10.
# For this reason, we don't want to refer to `finalAttrs.mesonBuildType` here, but rather use the environment variable. # For this reason, we don't want to refer to `finalAttrs.mesonBuildType` here, but rather use the environment variable.
preConfigure = prevAttrs.preConfigure or "" + '' preConfigure = prevAttrs.preConfigure or "" + lib.optionalString (
!stdenv.hostPlatform.isWindows
# build failure
&& !stdenv.hostPlatform.isStatic
) ''
case "$mesonBuildType" in case "$mesonBuildType" in
release|minsize) appendToVar mesonFlags "-Db_lto=true" ;; release|minsize) appendToVar mesonFlags "-Db_lto=true" ;;
*) appendToVar mesonFlags "-Db_lto=false" ;; *) appendToVar mesonFlags "-Db_lto=false" ;;
@ -97,6 +101,12 @@ let
]; ];
separateDebugInfo = !stdenv.hostPlatform.isStatic; separateDebugInfo = !stdenv.hostPlatform.isStatic;
hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie"; hardeningDisable = lib.optional stdenv.hostPlatform.isStatic "pie";
env = prevAttrs.env or {}
// lib.optionalAttrs
(stdenv.isLinux
&& !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")
&& !(stdenv.hostPlatform.useLLVM or false))
{ LDFLAGS = "-fuse-ld=gold"; };
}; };
mesonLibraryLayer = finalAttrs: prevAttrs: mesonLibraryLayer = finalAttrs: prevAttrs:

View file

@ -42,27 +42,35 @@
}: }:
let let
libs = {
inherit
nix-util
nix-util-c
nix-store
nix-store-c
nix-fetchers
nix-expr
nix-expr-c
nix-flake
nix-flake-c
nix-main
nix-main-c
nix-cmd
;
} // lib.optionalAttrs (!stdenv.hostPlatform.isStatic && stdenv.buildPlatform.canExecute stdenv.hostPlatform) {
# Currently fails in static build
inherit
nix-perl-bindings
;
};
dev = stdenv.mkDerivation (finalAttrs: { dev = stdenv.mkDerivation (finalAttrs: {
name = "nix-${nix-cli.version}-dev"; name = "nix-${nix-cli.version}-dev";
pname = "nix"; pname = "nix";
version = nix-cli.version; version = nix-cli.version;
dontUnpack = true; dontUnpack = true;
dontBuild = true; dontBuild = true;
libs = map lib.getDev [ libs = map lib.getDev (lib.attrValues libs);
nix-cmd
nix-expr
nix-expr-c
nix-fetchers
nix-flake
nix-flake-c
nix-main
nix-main-c
nix-store
nix-store-c
nix-util
nix-util-c
nix-perl-bindings
];
installPhase = '' installPhase = ''
mkdir -p $out/nix-support mkdir -p $out/nix-support
echo $libs >> $out/nix-support/propagated-build-inputs echo $libs >> $out/nix-support/propagated-build-inputs
@ -127,20 +135,16 @@ in
nix-fetchers-tests.tests.run nix-fetchers-tests.tests.run
nix-flake-tests.tests.run nix-flake-tests.tests.run
# Make sure the functional tests have passed
nix-functional-tests
# dev bundle is ok # dev bundle is ok
# (checkInputs must be empty paths??) # (checkInputs must be empty paths??)
(runCommand "check-pkg-config" { checked = dev.tests.pkg-config; } "mkdir $out") (runCommand "check-pkg-config" { checked = dev.tests.pkg-config; } "mkdir $out")
] ++ ] ++ lib.optionals (!stdenv.hostPlatform.isStatic && stdenv.buildPlatform.canExecute stdenv.hostPlatform) [
(if stdenv.buildPlatform.canExecute stdenv.hostPlatform # Perl currently fails in static build
then [ # TODO: Split out tests into a separate derivation?
# TODO: add perl.tests
nix-perl-bindings nix-perl-bindings
]
else [
nix-perl-bindings
]);
installCheckInputs = [
nix-functional-tests
]; ];
passthru = prevAttrs.passthru // { passthru = prevAttrs.passthru // {
inherit (nix-cli) version; inherit (nix-cli) version;
@ -162,21 +166,7 @@ in
disallowedReferences = nix.all; disallowedReferences = nix.all;
``` ```
*/ */
libs = { inherit libs;
inherit
nix-util
nix-util-c
nix-store
nix-store-c
nix-fetchers
nix-expr
nix-expr-c
nix-flake
nix-flake-c
nix-main
nix-main-c
;
};
tests = prevAttrs.passthru.tests or {} // { tests = prevAttrs.passthru.tests or {} // {
# TODO: create a proper fixpoint and: # TODO: create a proper fixpoint and:

View file

@ -145,13 +145,28 @@ poly_user_id_get() {
dsclattr "/Users/$1" "UniqueID" dsclattr "/Users/$1" "UniqueID"
} }
dscl_create() {
# workaround a bug in dscl where it sometimes fails with eNotYetImplemented:
# https://github.com/NixOS/nix/issues/12140
while ! _sudo "$1" /usr/bin/dscl . -create "$2" "$3" "$4" 2> "$SCRATCH/dscl.err"; do
local err=$?
if [[ $err -eq 140 ]] && grep -q "-14988 (eNotYetImplemented)" "$SCRATCH/dscl.err"; then
echo "dscl failed with eNotYetImplemented, retrying..."
sleep 1
continue
fi
cat "$SCRATCH/dscl.err"
return $err
done
}
poly_user_hidden_get() { poly_user_hidden_get() {
dsclattr "/Users/$1" "IsHidden" dsclattr "/Users/$1" "IsHidden"
} }
poly_user_hidden_set() { poly_user_hidden_set() {
_sudo "in order to make $1 a hidden user" \ dscl_create "in order to make $1 a hidden user" \
/usr/bin/dscl . -create "/Users/$1" "IsHidden" "1" "/Users/$1" "IsHidden" "1"
} }
poly_user_home_get() { poly_user_home_get() {
@ -161,8 +176,8 @@ poly_user_home_get() {
poly_user_home_set() { poly_user_home_set() {
# This can trigger a permission prompt now: # This can trigger a permission prompt now:
# "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings. # "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
_sudo "in order to give $1 a safe home directory" \ dscl_create "in order to give $1 a safe home directory" \
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2" "/Users/$1" "NFSHomeDirectory" "$2"
} }
poly_user_note_get() { poly_user_note_get() {
@ -170,8 +185,8 @@ poly_user_note_get() {
} }
poly_user_note_set() { poly_user_note_set() {
_sudo "in order to give $username a useful note" \ dscl_create "in order to give $1 a useful note" \
/usr/bin/dscl . -create "/Users/$1" "RealName" "$2" "/Users/$1" "RealName" "$2"
} }
poly_user_shell_get() { poly_user_shell_get() {
@ -179,8 +194,8 @@ poly_user_shell_get() {
} }
poly_user_shell_set() { poly_user_shell_set() {
_sudo "in order to give $1 a safe shell" \ dscl_create "in order to give $1 a safe shell" \
/usr/bin/dscl . -create "/Users/$1" "UserShell" "$2" "/Users/$1" "UserShell" "$2"
} }
poly_user_in_group_check() { poly_user_in_group_check() {

View file

@ -562,7 +562,7 @@ create_build_user_for_core() {
if [ "$actual_uid" != "$uid" ]; then if [ "$actual_uid" != "$uid" ]; then
failure <<EOF failure <<EOF
It seems the build user $username already exists, but with the UID It seems the build user $username already exists, but with the UID
with the UID '$actual_uid'. This script can't really handle that right '$actual_uid'. This script can't really handle that right
now, so I'm going to give up. now, so I'm going to give up.
If you already created the users and you know they start from If you already created the users and you know they start from

View file

@ -2,6 +2,9 @@
set -eo pipefail set -eo pipefail
# stock path to avoid unexpected command versions
PATH="$(/usr/bin/getconf PATH)"
((NEW_NIX_FIRST_BUILD_UID=351)) ((NEW_NIX_FIRST_BUILD_UID=351))
((TEMP_NIX_FIRST_BUILD_UID=31000)) ((TEMP_NIX_FIRST_BUILD_UID=31000))

View file

@ -450,7 +450,7 @@ ref<eval_cache::EvalCache> openEvalCache(
std::shared_ptr<flake::LockedFlake> lockedFlake) std::shared_ptr<flake::LockedFlake> lockedFlake)
{ {
auto fingerprint = evalSettings.useEvalCache && evalSettings.pureEval auto fingerprint = evalSettings.useEvalCache && evalSettings.pureEval
? lockedFlake->getFingerprint(state.store) ? lockedFlake->getFingerprint(state.store, state.fetchSettings)
: std::nullopt; : std::nullopt;
auto rootLoader = [&state, lockedFlake]() auto rootLoader = [&state, lockedFlake]()
{ {

View file

@ -76,10 +76,6 @@ mkMesonLibrary (finalAttrs: {
(lib.mesonOption "readline-flavor" readlineFlavor) (lib.mesonOption "readline-flavor" readlineFlavor)
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-store-c , nix-store-c
@ -47,10 +46,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-store-test-support , nix-store-test-support
@ -51,10 +50,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -56,10 +56,6 @@ mkMesonExecutable (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
passthru = { passthru = {
tests = { tests = {
run = runCommand "${finalAttrs.pname}-run" { run = runCommand "${finalAttrs.pname}-run" {

View file

@ -41,10 +41,17 @@ let
(key: node: (key: node:
let let
parentNode = allNodes.${getInputByPath lockFile.root node.parent};
sourceInfo = sourceInfo =
if overrides ? ${key} if overrides ? ${key}
then then
overrides.${key}.sourceInfo overrides.${key}.sourceInfo
else if node.locked.type == "path" && builtins.substring 0 1 node.locked.path != "/"
then
parentNode.sourceInfo // {
outPath = parentNode.outPath + ("/" + node.locked.path);
}
else else
# FIXME: remove obsolete node.info. # FIXME: remove obsolete node.info.
# Note: lock file entries are always final. # Note: lock file entries are always final.

View file

@ -406,7 +406,7 @@ void EvalState::checkURI(const std::string & uri)
/* If the URI is a path, then check it against allowedPaths as /* If the URI is a path, then check it against allowedPaths as
well. */ well. */
if (hasPrefix(uri, "/")) { if (isAbsolute(uri)) {
if (auto rootFS2 = rootFS.dynamic_pointer_cast<AllowListSourceAccessor>()) if (auto rootFS2 = rootFS.dynamic_pointer_cast<AllowListSourceAccessor>())
rootFS2->checkAccess(CanonPath(uri)); rootFS2->checkAccess(CanonPath(uri));
return; return;

View file

@ -96,8 +96,6 @@ mkMesonLibrary (finalAttrs: {
# https://github.com/NixOS/nixpkgs/issues/86131. # https://github.com/NixOS/nixpkgs/issues/86131.
BOOST_INCLUDEDIR = "${lib.getDev boost}/include"; BOOST_INCLUDEDIR = "${lib.getDev boost}/include";
BOOST_LIBRARYDIR = "${lib.getLib boost}/lib"; BOOST_LIBRARYDIR = "${lib.getLib boost}/lib";
} // lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
}; };
meta = { meta = {

View file

@ -182,7 +182,7 @@ static void fetchTree(
if (!state.settings.pureEval && !input.isDirect() && experimentalFeatureSettings.isEnabled(Xp::Flakes)) if (!state.settings.pureEval && !input.isDirect() && experimentalFeatureSettings.isEnabled(Xp::Flakes))
input = lookupInRegistries(state.store, input).first; input = lookupInRegistries(state.store, input).first;
if (state.settings.pureEval && !input.isLocked()) { if (state.settings.pureEval && !input.isConsideredLocked(state.fetchSettings)) {
auto fetcher = "fetchTree"; auto fetcher = "fetchTree";
if (params.isFetchGit) if (params.isFetchGit)
fetcher = "fetchGit"; fetcher = "fetchGit";

View file

@ -54,10 +54,6 @@ mkMesonExecutable (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
passthru = { passthru = {
tests = { tests = {
run = runCommand "${finalAttrs.pname}-run" { run = runCommand "${finalAttrs.pname}-run" {

View file

@ -70,6 +70,22 @@ struct Settings : public Config
Setting<bool> warnDirty{this, true, "warn-dirty", Setting<bool> warnDirty{this, true, "warn-dirty",
"Whether to warn about dirty Git/Mercurial trees."}; "Whether to warn about dirty Git/Mercurial trees."};
Setting<bool> allowDirtyLocks{
this,
false,
"allow-dirty-locks",
R"(
Whether to allow dirty inputs (such as dirty Git workdirs)
to be locked via their NAR hash. This is generally bad
practice since Nix has no way to obtain such inputs if they
are subsequently modified. Therefore lock files with dirty
locks should generally only be used for local testing, and
should not be pushed to other users.
)",
{},
true,
Xp::Flakes};
Setting<bool> trustTarballsFromGitForges{ Setting<bool> trustTarballsFromGitForges{
this, true, "trust-tarballs-from-git-forges", this, true, "trust-tarballs-from-git-forges",
R"( R"(

View file

@ -4,6 +4,7 @@
#include "fetch-to-store.hh" #include "fetch-to-store.hh"
#include "json-utils.hh" #include "json-utils.hh"
#include "store-path-accessor.hh" #include "store-path-accessor.hh"
#include "fetch-settings.hh"
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
@ -154,11 +155,23 @@ bool Input::isLocked() const
return scheme && scheme->isLocked(*this); return scheme && scheme->isLocked(*this);
} }
bool Input::isConsideredLocked(
const Settings & settings) const
{
return isLocked() || (settings.allowDirtyLocks && getNarHash());
}
bool Input::isFinal() const bool Input::isFinal() const
{ {
return maybeGetBoolAttr(attrs, "__final").value_or(false); return maybeGetBoolAttr(attrs, "__final").value_or(false);
} }
std::optional<std::string> Input::isRelative() const
{
assert(scheme);
return scheme->isRelative(*this);
}
Attrs Input::toAttrs() const Attrs Input::toAttrs() const
{ {
return attrs; return attrs;
@ -345,7 +358,7 @@ void Input::clone(const Path & destDir) const
scheme->clone(*this, destDir); scheme->clone(*this, destDir);
} }
std::optional<Path> Input::getSourcePath() const std::optional<std::filesystem::path> Input::getSourcePath() const
{ {
assert(scheme); assert(scheme);
return scheme->getSourcePath(*this); return scheme->getSourcePath(*this);
@ -448,7 +461,7 @@ Input InputScheme::applyOverrides(
return input; return input;
} }
std::optional<Path> InputScheme::getSourcePath(const Input & input) const std::optional<std::filesystem::path> InputScheme::getSourcePath(const Input & input) const
{ {
return {}; return {};
} }

View file

@ -41,11 +41,6 @@ struct Input
std::shared_ptr<InputScheme> scheme; // note: can be null std::shared_ptr<InputScheme> scheme; // note: can be null
Attrs attrs; Attrs attrs;
/**
* path of the parent of this input, used for relative path resolution
*/
std::optional<Path> parent;
/** /**
* Cached result of getFingerprint(). * Cached result of getFingerprint().
*/ */
@ -95,6 +90,21 @@ public:
*/ */
bool isLocked() const; bool isLocked() const;
/**
* Return whether the input is either locked, or, if
* `allow-dirty-locks` is enabled, it has a NAR hash. In the
* latter case, we can verify the input but we may not be able to
* fetch it from anywhere.
*/
bool isConsideredLocked(
const Settings & settings) const;
/**
* Only for relative path flakes, i.e. 'path:./foo', returns the
* relative path, i.e. './foo'.
*/
std::optional<std::string> isRelative() const;
/** /**
* Return whether this is a "final" input, meaning that fetching * Return whether this is a "final" input, meaning that fetching
* it will not add, remove or change any attributes. (See * it will not add, remove or change any attributes. (See
@ -154,7 +164,7 @@ public:
void clone(const Path & destDir) const; void clone(const Path & destDir) const;
std::optional<Path> getSourcePath() const; std::optional<std::filesystem::path> getSourcePath() const;
/** /**
* Write a file to this input, for input types that support * Write a file to this input, for input types that support
@ -237,7 +247,7 @@ struct InputScheme
virtual void clone(const Input & input, const Path & destDir) const; virtual void clone(const Input & input, const Path & destDir) const;
virtual std::optional<Path> getSourcePath(const Input & input) const; virtual std::optional<std::filesystem::path> getSourcePath(const Input & input) const;
virtual void putFile( virtual void putFile(
const Input & input, const Input & input,
@ -260,6 +270,9 @@ struct InputScheme
virtual bool isLocked(const Input & input) const virtual bool isLocked(const Input & input) const
{ return false; } { return false; }
virtual std::optional<std::string> isRelative(const Input & input) const
{ return std::nullopt; }
}; };
void registerInputScheme(std::shared_ptr<InputScheme> && fetcher); void registerInputScheme(std::shared_ptr<InputScheme> && fetcher);

View file

@ -300,7 +300,7 @@ struct GitInputScheme : InputScheme
Strings args = {"clone"}; Strings args = {"clone"};
args.push_back(repoInfo.url); args.push_back(repoInfo.locationToArg());
if (auto ref = input.getRef()) { if (auto ref = input.getRef()) {
args.push_back("--branch"); args.push_back("--branch");
@ -314,11 +314,9 @@ struct GitInputScheme : InputScheme
runProgram("git", true, args, {}, true); runProgram("git", true, args, {}, true);
} }
std::optional<Path> getSourcePath(const Input & input) const override std::optional<std::filesystem::path> getSourcePath(const Input & input) const override
{ {
auto repoInfo = getRepoInfo(input); return getRepoInfo(input).getPath();
if (repoInfo.isLocal) return repoInfo.url;
return std::nullopt;
} }
void putFile( void putFile(
@ -328,14 +326,15 @@ struct GitInputScheme : InputScheme
std::optional<std::string> commitMsg) const override std::optional<std::string> commitMsg) const override
{ {
auto repoInfo = getRepoInfo(input); auto repoInfo = getRepoInfo(input);
if (!repoInfo.isLocal) auto repoPath = repoInfo.getPath();
if (!repoPath)
throw Error("cannot commit '%s' to Git repository '%s' because it's not a working tree", path, input.to_string()); throw Error("cannot commit '%s' to Git repository '%s' because it's not a working tree", path, input.to_string());
writeFile((CanonPath(repoInfo.url) / path).abs(), contents); writeFile(*repoPath / path.rel(), contents);
auto result = runProgram(RunOptions { auto result = runProgram(RunOptions {
.program = "git", .program = "git",
.args = {"-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "check-ignore", "--quiet", std::string(path.rel())}, .args = {"-C", repoPath->string(), "--git-dir", repoInfo.gitDir, "check-ignore", "--quiet", std::string(path.rel())},
}); });
auto exitCode = auto exitCode =
#ifndef WIN32 // TODO abstract over exit status handling on Windows #ifndef WIN32 // TODO abstract over exit status handling on Windows
@ -348,7 +347,7 @@ struct GitInputScheme : InputScheme
if (exitCode != 0) { if (exitCode != 0) {
// The path is not `.gitignore`d, we can add the file. // The path is not `.gitignore`d, we can add the file.
runProgram("git", true, runProgram("git", true,
{ "-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "add", "--intent-to-add", "--", std::string(path.rel()) }); { "-C", repoPath->string(), "--git-dir", repoInfo.gitDir, "add", "--intent-to-add", "--", std::string(path.rel()) });
if (commitMsg) { if (commitMsg) {
@ -356,7 +355,7 @@ struct GitInputScheme : InputScheme
logger->pause(); logger->pause();
Finally restoreLogger([]() { logger->resume(); }); Finally restoreLogger([]() { logger->resume(); });
runProgram("git", true, runProgram("git", true,
{ "-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "commit", std::string(path.rel()), "-F", "-" }, { "-C", repoPath->string(), "--git-dir", repoInfo.gitDir, "commit", std::string(path.rel()), "-F", "-" },
*commitMsg); *commitMsg);
} }
} }
@ -364,24 +363,41 @@ struct GitInputScheme : InputScheme
struct RepoInfo struct RepoInfo
{ {
/* Whether this is a local, non-bare repository. */ /* Either the path of the repo (for local, non-bare repos), or
bool isLocal = false; the URL (which is never a `file` URL). */
std::variant<std::filesystem::path, ParsedURL> location;
/* Working directory info: the complete list of files, and /* Working directory info: the complete list of files, and
whether the working directory is dirty compared to HEAD. */ whether the working directory is dirty compared to HEAD. */
GitRepo::WorkdirInfo workdirInfo; GitRepo::WorkdirInfo workdirInfo;
/* URL of the repo, or its path if isLocal. Never a `file` URL. */ std::string locationToArg() const
std::string url; {
return std::visit(
overloaded {
[&](const std::filesystem::path & path)
{ return path.string(); },
[&](const ParsedURL & url)
{ return url.to_string(); }
}, location);
}
std::optional<std::filesystem::path> getPath() const
{
if (auto path = std::get_if<std::filesystem::path>(&location))
return *path;
else
return std::nullopt;
}
void warnDirty(const Settings & settings) const void warnDirty(const Settings & settings) const
{ {
if (workdirInfo.isDirty) { if (workdirInfo.isDirty) {
if (!settings.allowDirty) if (!settings.allowDirty)
throw Error("Git tree '%s' is dirty", url); throw Error("Git tree '%s' is dirty", locationToArg());
if (settings.warnDirty) if (settings.warnDirty)
warn("Git tree '%s' is dirty", url); warn("Git tree '%s' is dirty", locationToArg());
} }
} }
@ -433,18 +449,36 @@ struct GitInputScheme : InputScheme
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing
auto url = parseURL(getStrAttr(input.attrs, "url")); auto url = parseURL(getStrAttr(input.attrs, "url"));
bool isBareRepository = url.scheme == "file" && !pathExists(url.path + "/.git"); bool isBareRepository = url.scheme == "file" && !pathExists(url.path + "/.git");
repoInfo.isLocal = url.scheme == "file" && !forceHttp && !isBareRepository; //
repoInfo.url = repoInfo.isLocal ? url.path : url.to_string(); // FIXME: here we turn a possibly relative path into an absolute path.
// This allows relative git flake inputs to be resolved against the
// **current working directory** (as in POSIX), which tends to work out
// ok in the context of flakes, but is the wrong behavior,
// as it should resolve against the flake.nix base directory instead.
//
// See: https://discourse.nixos.org/t/57783 and #9708
//
if (url.scheme == "file" && !forceHttp && !isBareRepository) {
if (!isAbsolute(url.path)) {
warn(
"Fetching Git repository '%s', which uses a path relative to the current directory. "
"This is not supported and will stop working in a future release. "
"See https://github.com/NixOS/nix/issues/12281 for details.",
url);
}
repoInfo.location = std::filesystem::absolute(url.path);
} else
repoInfo.location = url;
// If this is a local directory and no ref or revision is // If this is a local directory and no ref or revision is
// given, then allow the use of an unclean working tree. // given, then allow the use of an unclean working tree.
if (!input.getRef() && !input.getRev() && repoInfo.isLocal) if (auto repoPath = repoInfo.getPath(); !input.getRef() && !input.getRev() && repoPath)
repoInfo.workdirInfo = GitRepo::getCachedWorkdirInfo(repoInfo.url); repoInfo.workdirInfo = GitRepo::getCachedWorkdirInfo(*repoPath);
return repoInfo; return repoInfo;
} }
uint64_t getLastModified(const RepoInfo & repoInfo, const std::string & repoDir, const Hash & rev) const uint64_t getLastModified(const RepoInfo & repoInfo, const std::filesystem::path & repoDir, const Hash & rev) const
{ {
Cache::Key key{"gitLastModified", {{"rev", rev.gitRev()}}}; Cache::Key key{"gitLastModified", {{"rev", rev.gitRev()}}};
@ -460,7 +494,7 @@ struct GitInputScheme : InputScheme
return lastModified; return lastModified;
} }
uint64_t getRevCount(const RepoInfo & repoInfo, const std::string & repoDir, const Hash & rev) const uint64_t getRevCount(const RepoInfo & repoInfo, const std::filesystem::path & repoDir, const Hash & rev) const
{ {
Cache::Key key{"gitRevCount", {{"rev", rev.gitRev()}}}; Cache::Key key{"gitRevCount", {{"rev", rev.gitRev()}}};
@ -469,7 +503,7 @@ struct GitInputScheme : InputScheme
if (auto revCountAttrs = cache->lookup(key)) if (auto revCountAttrs = cache->lookup(key))
return getIntAttr(*revCountAttrs, "revCount"); return getIntAttr(*revCountAttrs, "revCount");
Activity act(*logger, lvlChatty, actUnknown, fmt("getting Git revision count of '%s'", repoInfo.url)); Activity act(*logger, lvlChatty, actUnknown, fmt("getting Git revision count of '%s'", repoInfo.locationToArg()));
auto revCount = GitRepo::openRepo(repoDir)->getRevCount(rev); auto revCount = GitRepo::openRepo(repoDir)->getRevCount(rev);
@ -480,11 +514,15 @@ struct GitInputScheme : InputScheme
std::string getDefaultRef(const RepoInfo & repoInfo) const std::string getDefaultRef(const RepoInfo & repoInfo) const
{ {
auto head = repoInfo.isLocal auto head = std::visit(
? GitRepo::openRepo(repoInfo.url)->getWorkdirRef() overloaded {
: readHeadCached(repoInfo.url); [&](const std::filesystem::path & path)
{ return GitRepo::openRepo(path)->getWorkdirRef(); },
[&](const ParsedURL & url)
{ return readHeadCached(url.to_string()); }
}, repoInfo.location);
if (!head) { if (!head) {
warn("could not read HEAD ref from repo at '%s', using 'master'", repoInfo.url); warn("could not read HEAD ref from repo at '%s', using 'master'", repoInfo.locationToArg());
return "master"; return "master";
} }
return *head; return *head;
@ -527,29 +565,30 @@ struct GitInputScheme : InputScheme
auto ref = originalRef ? *originalRef : getDefaultRef(repoInfo); auto ref = originalRef ? *originalRef : getDefaultRef(repoInfo);
input.attrs.insert_or_assign("ref", ref); input.attrs.insert_or_assign("ref", ref);
Path repoDir; std::filesystem::path repoDir;
if (repoInfo.isLocal) { if (auto repoPath = repoInfo.getPath()) {
repoDir = repoInfo.url; repoDir = *repoPath;
if (!input.getRev()) if (!input.getRev())
input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir)->resolveRef(ref).gitRev()); input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir)->resolveRef(ref).gitRev());
} else { } else {
Path cacheDir = getCachePath(repoInfo.url, getShallowAttr(input)); auto repoUrl = std::get<ParsedURL>(repoInfo.location);
std::filesystem::path cacheDir = getCachePath(repoUrl.to_string(), getShallowAttr(input));
repoDir = cacheDir; repoDir = cacheDir;
repoInfo.gitDir = "."; repoInfo.gitDir = ".";
createDirs(dirOf(cacheDir)); std::filesystem::create_directories(cacheDir.parent_path());
PathLocks cacheDirLock({cacheDir}); PathLocks cacheDirLock({cacheDir.string()});
auto repo = GitRepo::openRepo(cacheDir, true, true); auto repo = GitRepo::openRepo(cacheDir, true, true);
// We need to set the origin so resolving submodule URLs works // We need to set the origin so resolving submodule URLs works
repo->setRemote("origin", repoInfo.url); repo->setRemote("origin", repoUrl.to_string());
Path localRefFile = auto localRefFile =
ref.compare(0, 5, "refs/") == 0 ref.compare(0, 5, "refs/") == 0
? cacheDir + "/" + ref ? cacheDir / ref
: cacheDir + "/refs/heads/" + ref; : cacheDir / "refs/heads" / ref;
bool doFetch; bool doFetch;
time_t now = time(0); time_t now = time(0);
@ -565,7 +604,7 @@ struct GitInputScheme : InputScheme
/* If the local ref is older than tarball-ttl seconds, do a /* If the local ref is older than tarball-ttl seconds, do a
git fetch to update the local ref to the remote ref. */ git fetch to update the local ref to the remote ref. */
struct stat st; struct stat st;
doFetch = stat(localRefFile.c_str(), &st) != 0 || doFetch = stat(localRefFile.string().c_str(), &st) != 0 ||
!isCacheFileWithinTtl(now, st); !isCacheFileWithinTtl(now, st);
} }
} }
@ -583,11 +622,11 @@ struct GitInputScheme : InputScheme
? ref ? ref
: "refs/heads/" + ref; : "refs/heads/" + ref;
repo->fetch(repoInfo.url, fmt("%s:%s", fetchRef, fetchRef), getShallowAttr(input)); repo->fetch(repoUrl.to_string(), fmt("%s:%s", fetchRef, fetchRef), getShallowAttr(input));
} catch (Error & e) { } catch (Error & e) {
if (!pathExists(localRefFile)) throw; if (!std::filesystem::exists(localRefFile)) throw;
logError(e.info()); logError(e.info());
warn("could not update local clone of Git repository '%s'; continuing with the most recent version", repoInfo.url); warn("could not update local clone of Git repository '%s'; continuing with the most recent version", repoInfo.locationToArg());
} }
try { try {
@ -596,8 +635,8 @@ struct GitInputScheme : InputScheme
} catch (Error & e) { } catch (Error & e) {
warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg); warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg);
} }
if (!originalRef && !storeCachedHead(repoInfo.url, ref)) if (!originalRef && !storeCachedHead(repoUrl.to_string(), ref))
warn("could not update cached head '%s' for '%s'", ref, repoInfo.url); warn("could not update cached head '%s' for '%s'", ref, repoInfo.locationToArg());
} }
if (auto rev = input.getRev()) { if (auto rev = input.getRev()) {
@ -609,8 +648,7 @@ struct GitInputScheme : InputScheme
"allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".", "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
rev->gitRev(), rev->gitRev(),
ref, ref,
repoInfo.url repoInfo.locationToArg());
);
} else } else
input.attrs.insert_or_assign("rev", repo->resolveRef(ref).gitRev()); input.attrs.insert_or_assign("rev", repo->resolveRef(ref).gitRev());
@ -622,7 +660,7 @@ struct GitInputScheme : InputScheme
auto isShallow = repo->isShallow(); auto isShallow = repo->isShallow();
if (isShallow && !getShallowAttr(input)) if (isShallow && !getShallowAttr(input))
throw Error("'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", repoInfo.url); throw Error("'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", repoInfo.locationToArg());
// FIXME: check whether rev is an ancestor of ref? // FIXME: check whether rev is an ancestor of ref?
@ -637,7 +675,7 @@ struct GitInputScheme : InputScheme
infoAttrs.insert_or_assign("revCount", infoAttrs.insert_or_assign("revCount",
getRevCount(repoInfo, repoDir, rev)); getRevCount(repoInfo, repoDir, rev));
printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.url); printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.locationToArg());
verifyCommit(input, repo); verifyCommit(input, repo);
@ -693,21 +731,23 @@ struct GitInputScheme : InputScheme
RepoInfo & repoInfo, RepoInfo & repoInfo,
Input && input) const Input && input) const
{ {
auto repoPath = repoInfo.getPath().value();
if (getSubmodulesAttr(input)) if (getSubmodulesAttr(input))
/* Create mountpoints for the submodules. */ /* Create mountpoints for the submodules. */
for (auto & submodule : repoInfo.workdirInfo.submodules) for (auto & submodule : repoInfo.workdirInfo.submodules)
repoInfo.workdirInfo.files.insert(submodule.path); repoInfo.workdirInfo.files.insert(submodule.path);
auto repo = GitRepo::openRepo(repoInfo.url, false, false); auto repo = GitRepo::openRepo(repoPath, false, false);
auto exportIgnore = getExportIgnoreAttr(input); auto exportIgnore = getExportIgnoreAttr(input);
ref<SourceAccessor> accessor = ref<SourceAccessor> accessor =
repo->getAccessor(repoInfo.workdirInfo, repo->getAccessor(repoInfo.workdirInfo,
exportIgnore, exportIgnore,
makeNotAllowedError(repoInfo.url)); makeNotAllowedError(repoInfo.locationToArg()));
accessor->setPathDisplay(repoInfo.url); accessor->setPathDisplay(repoInfo.locationToArg());
/* If the repo has submodules, return a mounted input accessor /* If the repo has submodules, return a mounted input accessor
consisting of the accessor for the top-level repo and the consisting of the accessor for the top-level repo and the
@ -716,10 +756,10 @@ struct GitInputScheme : InputScheme
std::map<CanonPath, nix::ref<SourceAccessor>> mounts; std::map<CanonPath, nix::ref<SourceAccessor>> mounts;
for (auto & submodule : repoInfo.workdirInfo.submodules) { for (auto & submodule : repoInfo.workdirInfo.submodules) {
auto submodulePath = CanonPath(repoInfo.url) / submodule.path; auto submodulePath = repoPath / submodule.path.rel();
fetchers::Attrs attrs; fetchers::Attrs attrs;
attrs.insert_or_assign("type", "git"); attrs.insert_or_assign("type", "git");
attrs.insert_or_assign("url", submodulePath.abs()); attrs.insert_or_assign("url", submodulePath.string());
attrs.insert_or_assign("exportIgnore", Explicit<bool>{ exportIgnore }); attrs.insert_or_assign("exportIgnore", Explicit<bool>{ exportIgnore });
attrs.insert_or_assign("submodules", Explicit<bool>{ true }); attrs.insert_or_assign("submodules", Explicit<bool>{ true });
// TODO: fall back to getAccessorFromCommit-like fetch when submodules aren't checked out // TODO: fall back to getAccessorFromCommit-like fetch when submodules aren't checked out
@ -743,7 +783,7 @@ struct GitInputScheme : InputScheme
} }
if (!repoInfo.workdirInfo.isDirty) { if (!repoInfo.workdirInfo.isDirty) {
auto repo = GitRepo::openRepo(repoInfo.url); auto repo = GitRepo::openRepo(repoPath);
if (auto ref = repo->getWorkdirRef()) if (auto ref = repo->getWorkdirRef())
input.attrs.insert_or_assign("ref", *ref); input.attrs.insert_or_assign("ref", *ref);
@ -753,7 +793,7 @@ struct GitInputScheme : InputScheme
input.attrs.insert_or_assign("rev", rev.gitRev()); input.attrs.insert_or_assign("rev", rev.gitRev());
input.attrs.insert_or_assign("revCount", input.attrs.insert_or_assign("revCount",
rev == nullRev ? 0 : getRevCount(repoInfo, repoInfo.url, rev)); rev == nullRev ? 0 : getRevCount(repoInfo, repoPath, rev));
verifyCommit(input, repo); verifyCommit(input, repo);
} else { } else {
@ -772,7 +812,7 @@ struct GitInputScheme : InputScheme
input.attrs.insert_or_assign( input.attrs.insert_or_assign(
"lastModified", "lastModified",
repoInfo.workdirInfo.headRev repoInfo.workdirInfo.headRev
? getLastModified(repoInfo, repoInfo.url, *repoInfo.workdirInfo.headRev) ? getLastModified(repoInfo, repoPath, *repoInfo.workdirInfo.headRev)
: 0); : 0);
return {accessor, std::move(input)}; return {accessor, std::move(input)};
@ -795,7 +835,7 @@ struct GitInputScheme : InputScheme
} }
auto [accessor, final] = auto [accessor, final] =
input.getRef() || input.getRev() || !repoInfo.isLocal input.getRef() || input.getRev() || !repoInfo.getPath()
? getAccessorFromCommit(store, repoInfo, std::move(input)) ? getAccessorFromCommit(store, repoInfo, std::move(input))
: getAccessorFromWorkdir(store, repoInfo, std::move(input)); : getAccessorFromWorkdir(store, repoInfo, std::move(input));
@ -813,14 +853,14 @@ struct GitInputScheme : InputScheme
return makeFingerprint(*rev); return makeFingerprint(*rev);
else { else {
auto repoInfo = getRepoInfo(input); auto repoInfo = getRepoInfo(input);
if (repoInfo.isLocal && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) { if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) {
/* Calculate a fingerprint that takes into account the /* Calculate a fingerprint that takes into account the
deleted and modified/added files. */ deleted and modified/added files. */
HashSink hashSink{HashAlgorithm::SHA512}; HashSink hashSink{HashAlgorithm::SHA512};
for (auto & file : repoInfo.workdirInfo.dirtyFiles) { for (auto & file : repoInfo.workdirInfo.dirtyFiles) {
writeString("modified:", hashSink); writeString("modified:", hashSink);
writeString(file.abs(), hashSink); writeString(file.abs(), hashSink);
dumpPath(repoInfo.url + "/" + file.abs(), hashSink); dumpPath((*repoPath / file.rel()).string(), hashSink);
} }
for (auto & file : repoInfo.workdirInfo.deletedFiles) { for (auto & file : repoInfo.workdirInfo.deletedFiles) {
writeString("deleted:", hashSink); writeString("deleted:", hashSink);

View file

@ -126,7 +126,7 @@ struct MercurialInputScheme : InputScheme
return res; return res;
} }
std::optional<Path> getSourcePath(const Input & input) const override std::optional<std::filesystem::path> getSourcePath(const Input & input) const override
{ {
auto url = parseURL(getStrAttr(input.attrs, "url")); auto url = parseURL(getStrAttr(input.attrs, "url"));
if (url.scheme == "file" && !input.getRef() && !input.getRev()) if (url.scheme == "file" && !input.getRef() && !input.getRev())

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util , nix-util
@ -51,10 +50,6 @@ mkMesonLibrary (finalAttrs: {
echo ${version} > ../../.version echo ${version} > ../../.version
''; '';
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -80,9 +80,9 @@ struct PathInputScheme : InputScheme
}; };
} }
std::optional<Path> getSourcePath(const Input & input) const override std::optional<std::filesystem::path> getSourcePath(const Input & input) const override
{ {
return getStrAttr(input.attrs, "path"); return getAbsPath(input);
} }
void putFile( void putFile(
@ -91,13 +91,13 @@ struct PathInputScheme : InputScheme
std::string_view contents, std::string_view contents,
std::optional<std::string> commitMsg) const override std::optional<std::string> commitMsg) const override
{ {
writeFile((CanonPath(getAbsPath(input)) / path).abs(), contents); writeFile(getAbsPath(input) / path.rel(), contents);
} }
std::optional<std::string> isRelative(const Input & input) const std::optional<std::string> isRelative(const Input & input) const override
{ {
auto path = getStrAttr(input.attrs, "path"); auto path = getStrAttr(input.attrs, "path");
if (hasPrefix(path, "/")) if (isAbsolute(path))
return std::nullopt; return std::nullopt;
else else
return path; return path;
@ -108,12 +108,12 @@ struct PathInputScheme : InputScheme
return (bool) input.getNarHash(); return (bool) input.getNarHash();
} }
CanonPath getAbsPath(const Input & input) const std::filesystem::path getAbsPath(const Input & input) const
{ {
auto path = getStrAttr(input.attrs, "path"); auto path = getStrAttr(input.attrs, "path");
if (path[0] == '/') if (isAbsolute(path))
return CanonPath(path); return canonPath(path);
throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string()); throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string());
} }
@ -121,31 +121,14 @@ struct PathInputScheme : InputScheme
std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override std::pair<ref<SourceAccessor>, Input> getAccessor(ref<Store> store, const Input & _input) const override
{ {
Input input(_input); Input input(_input);
std::string absPath;
auto path = getStrAttr(input.attrs, "path"); auto path = getStrAttr(input.attrs, "path");
if (path[0] != '/') { auto absPath = getAbsPath(input);
if (!input.parent)
throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string());
auto parent = canonPath(*input.parent); Activity act(*logger, lvlTalkative, actUnknown, fmt("copying '%s' to the store", absPath));
// the path isn't relative, prefix it
absPath = nix::absPath(path, parent);
// for security, ensure that if the parent is a store path, it's inside it
if (store->isInStore(parent)) {
auto storePath = store->printStorePath(store->toStorePath(parent).first);
if (!isDirOrInDir(absPath, storePath))
throw BadStorePath("relative path '%s' points outside of its parent's store path '%s'", path, storePath);
}
} else
absPath = path;
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying '%s'", absPath));
// FIXME: check whether access to 'path' is allowed. // FIXME: check whether access to 'path' is allowed.
auto storePath = store->maybeParseStorePath(absPath); auto storePath = store->maybeParseStorePath(absPath.string());
if (storePath) if (storePath)
store->addTempRoot(*storePath); store->addTempRoot(*storePath);
@ -154,7 +137,7 @@ struct PathInputScheme : InputScheme
if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) { if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) {
// FIXME: try to substitute storePath. // FIXME: try to substitute storePath.
auto src = sinkToSource([&](Sink & sink) { auto src = sinkToSource([&](Sink & sink) {
mtime = dumpPathAndGetMtime(absPath, sink, defaultPathFilter); mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter);
}); });
storePath = store->addToStoreFromDump(*src, "source"); storePath = store->addToStoreFromDump(*src, "source");
} }
@ -176,7 +159,7 @@ struct PathInputScheme : InputScheme
store object and the subpath. */ store object and the subpath. */
auto path = getAbsPath(input); auto path = getAbsPath(input);
try { try {
auto [storePath, subPath] = store->toStorePath(path.abs()); auto [storePath, subPath] = store->toStorePath(path.string());
auto info = store->queryPathInfo(storePath); auto info = store->queryPathInfo(storePath);
return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath); return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath);
} catch (Error &) { } catch (Error &) {

View file

@ -153,7 +153,7 @@ static std::shared_ptr<Registry> getGlobalRegistry(const Settings & settings, re
return std::make_shared<Registry>(settings, Registry::Global); // empty registry return std::make_shared<Registry>(settings, Registry::Global); // empty registry
} }
if (!hasPrefix(path, "/")) { if (!isAbsolute(path)) {
auto storePath = downloadFile(store, path, "flake-registry.json").storePath; auto storePath = downloadFile(store, path, "flake-registry.json").storePath;
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>()) if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
store2->addPermRoot(storePath, getCacheDir() + "/flake-registry.json"); store2->addPermRoot(storePath, getCacheDir() + "/flake-registry.json");

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-store-c , nix-store-c
@ -49,10 +48,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -56,10 +56,6 @@ mkMesonExecutable (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
passthru = { passthru = {
tests = { tests = {
run = runCommand "${finalAttrs.pname}-run" { run = runCommand "${finalAttrs.pname}-run" {

View file

@ -43,7 +43,7 @@ static std::optional<FetchedFlake> lookupInFlakeCache(
static std::tuple<StorePath, FlakeRef, FlakeRef> fetchOrSubstituteTree( static std::tuple<StorePath, FlakeRef, FlakeRef> fetchOrSubstituteTree(
EvalState & state, EvalState & state,
const FlakeRef & originalRef, const FlakeRef & originalRef,
bool allowLookup, bool useRegistries,
FlakeCache & flakeCache) FlakeCache & flakeCache)
{ {
auto fetched = lookupInFlakeCache(flakeCache, originalRef); auto fetched = lookupInFlakeCache(flakeCache, originalRef);
@ -54,7 +54,7 @@ static std::tuple<StorePath, FlakeRef, FlakeRef> fetchOrSubstituteTree(
auto [storePath, lockedRef] = originalRef.fetchTree(state.store); auto [storePath, lockedRef] = originalRef.fetchTree(state.store);
fetched.emplace(FetchedFlake{.lockedRef = lockedRef, .storePath = storePath}); fetched.emplace(FetchedFlake{.lockedRef = lockedRef, .storePath = storePath});
} else { } else {
if (allowLookup) { if (useRegistries) {
resolvedRef = originalRef.resolve( resolvedRef = originalRef.resolve(
state.store, state.store,
[](fetchers::Registry::RegistryType type) { [](fetchers::Registry::RegistryType type) {
@ -102,12 +102,19 @@ static void expectType(EvalState & state, ValueType type,
} }
static std::map<FlakeId, FlakeInput> parseFlakeInputs( static std::map<FlakeId, FlakeInput> parseFlakeInputs(
EvalState & state, Value * value, const PosIdx pos, EvalState & state,
const std::optional<Path> & baseDir, InputPath lockRootPath); Value * value,
const PosIdx pos,
const InputPath & lockRootPath,
const SourcePath & flakeDir);
static FlakeInput parseFlakeInput(EvalState & state, static FlakeInput parseFlakeInput(
std::string_view inputName, Value * value, const PosIdx pos, EvalState & state,
const std::optional<Path> & baseDir, InputPath lockRootPath) std::string_view inputName,
Value * value,
const PosIdx pos,
const InputPath & lockRootPath,
const SourcePath & flakeDir)
{ {
expectType(state, nAttrs, *value, pos); expectType(state, nAttrs, *value, pos);
@ -124,14 +131,25 @@ static FlakeInput parseFlakeInput(EvalState & state,
for (auto & attr : *value->attrs()) { for (auto & attr : *value->attrs()) {
try { try {
if (attr.name == sUrl) { if (attr.name == sUrl) {
expectType(state, nString, *attr.value, attr.pos); forceTrivialValue(state, *attr.value, pos);
if (attr.value->type() == nString)
url = attr.value->string_view(); url = attr.value->string_view();
else if (attr.value->type() == nPath) {
auto path = attr.value->path();
if (path.accessor != flakeDir.accessor)
throw Error("input path '%s' at %s must be in the same source tree as %s",
path, state.positions[attr.pos], flakeDir);
url = "path:" + flakeDir.path.makeRelative(path.path);
}
else
throw Error("expected a string or a path but got %s at %s",
showType(attr.value->type()), state.positions[attr.pos]);
attrs.emplace("url", *url); attrs.emplace("url", *url);
} else if (attr.name == sFlake) { } else if (attr.name == sFlake) {
expectType(state, nBool, *attr.value, attr.pos); expectType(state, nBool, *attr.value, attr.pos);
input.isFlake = attr.value->boolean(); input.isFlake = attr.value->boolean();
} else if (attr.name == sInputs) { } else if (attr.name == sInputs) {
input.overrides = parseFlakeInputs(state, attr.value, attr.pos, baseDir, lockRootPath); input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootPath, flakeDir);
} else if (attr.name == sFollows) { } else if (attr.name == sFollows) {
expectType(state, nString, *attr.value, attr.pos); expectType(state, nString, *attr.value, attr.pos);
auto follows(parseInputPath(attr.value->c_str())); auto follows(parseInputPath(attr.value->c_str()));
@ -189,7 +207,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
if (!attrs.empty()) if (!attrs.empty())
throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, state.positions[pos]); throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, state.positions[pos]);
if (url) if (url)
input.ref = parseFlakeRef(state.fetchSettings, *url, baseDir, true, input.isFlake); input.ref = parseFlakeRef(state.fetchSettings, *url, {}, true, input.isFlake, true);
} }
if (!input.follows && !input.ref) if (!input.follows && !input.ref)
@ -199,8 +217,11 @@ static FlakeInput parseFlakeInput(EvalState & state,
} }
static std::map<FlakeId, FlakeInput> parseFlakeInputs( static std::map<FlakeId, FlakeInput> parseFlakeInputs(
EvalState & state, Value * value, const PosIdx pos, EvalState & state,
const std::optional<Path> & baseDir, InputPath lockRootPath) Value * value,
const PosIdx pos,
const InputPath & lockRootPath,
const SourcePath & flakeDir)
{ {
std::map<FlakeId, FlakeInput> inputs; std::map<FlakeId, FlakeInput> inputs;
@ -212,8 +233,8 @@ static std::map<FlakeId, FlakeInput> parseFlakeInputs(
state.symbols[inputAttr.name], state.symbols[inputAttr.name],
inputAttr.value, inputAttr.value,
inputAttr.pos, inputAttr.pos,
baseDir, lockRootPath,
lockRootPath)); flakeDir));
} }
return inputs; return inputs;
@ -227,7 +248,8 @@ static Flake readFlake(
const SourcePath & rootDir, const SourcePath & rootDir,
const InputPath & lockRootPath) const InputPath & lockRootPath)
{ {
auto flakePath = rootDir / CanonPath(resolvedRef.subdir) / "flake.nix"; auto flakeDir = rootDir / CanonPath(resolvedRef.subdir);
auto flakePath = flakeDir / "flake.nix";
// NOTE evalFile forces vInfo to be an attrset because mustBeTrivial is true. // NOTE evalFile forces vInfo to be an attrset because mustBeTrivial is true.
Value vInfo; Value vInfo;
@ -248,7 +270,7 @@ static Flake readFlake(
auto sInputs = state.symbols.create("inputs"); auto sInputs = state.symbols.create("inputs");
if (auto inputs = vInfo.attrs()->get(sInputs)) if (auto inputs = vInfo.attrs()->get(sInputs))
flake.inputs = parseFlakeInputs(state, inputs->value, inputs->pos, flakePath.parent().path.abs(), lockRootPath); // FIXME flake.inputs = parseFlakeInputs(state, inputs->value, inputs->pos, lockRootPath, flakeDir);
auto sOutputs = state.symbols.create("outputs"); auto sOutputs = state.symbols.create("outputs");
@ -323,25 +345,20 @@ static Flake readFlake(
static Flake getFlake( static Flake getFlake(
EvalState & state, EvalState & state,
const FlakeRef & originalRef, const FlakeRef & originalRef,
bool allowLookup, bool useRegistries,
FlakeCache & flakeCache, FlakeCache & flakeCache,
InputPath lockRootPath) const InputPath & lockRootPath)
{ {
auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree( auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, originalRef, allowLookup, flakeCache); state, originalRef, useRegistries, flakeCache);
return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootPath); return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootPath);
} }
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup, FlakeCache & flakeCache) Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries)
{
return getFlake(state, originalRef, allowLookup, flakeCache, {});
}
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup)
{ {
FlakeCache flakeCache; FlakeCache flakeCache;
return getFlake(state, originalRef, allowLookup, flakeCache); return getFlake(state, originalRef, useRegistries, flakeCache, {});
} }
static LockFile readLockFile( static LockFile readLockFile(
@ -367,7 +384,7 @@ LockedFlake lockFlake(
auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
auto flake = getFlake(state, topRef, useRegistries, flakeCache); auto flake = getFlake(state, topRef, useRegistries, flakeCache, {});
if (lockFlags.applyNixConfig) { if (lockFlags.applyNixConfig) {
flake.config.apply(settings); flake.config.apply(settings);
@ -386,13 +403,29 @@ LockedFlake lockFlake(
debug("old lock file: %s", oldLockFile); debug("old lock file: %s", oldLockFile);
std::map<InputPath, FlakeInput> overrides; struct OverrideTarget
{
FlakeInput input;
SourcePath sourcePath;
std::optional<InputPath> parentInputPath; // FIXME: rename to inputPathPrefix?
};
std::map<InputPath, OverrideTarget> overrides;
std::set<InputPath> explicitCliOverrides; std::set<InputPath> explicitCliOverrides;
std::set<InputPath> overridesUsed, updatesUsed; std::set<InputPath> overridesUsed, updatesUsed;
std::map<ref<Node>, SourcePath> nodePaths; std::map<ref<Node>, SourcePath> nodePaths;
for (auto & i : lockFlags.inputOverrides) { for (auto & i : lockFlags.inputOverrides) {
overrides.insert_or_assign(i.first, FlakeInput { .ref = i.second }); overrides.emplace(
i.first,
OverrideTarget {
.input = FlakeInput { .ref = i.second },
/* Note: any relative overrides
(e.g. `--override-input B/C "path:./foo/bar"`)
are interpreted relative to the top-level
flake. */
.sourcePath = flake.path,
});
explicitCliOverrides.insert(i.first); explicitCliOverrides.insert(i.first);
} }
@ -405,8 +438,8 @@ LockedFlake lockFlake(
ref<Node> node, ref<Node> node,
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode, std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath, const InputPath & followsPrefix,
const Path & parentPath, const SourcePath & sourcePath,
bool trustLock)> bool trustLock)>
computeLocks; computeLocks;
@ -421,8 +454,13 @@ LockedFlake lockFlake(
/* The old node, if any, from which locks can be /* The old node, if any, from which locks can be
copied. */ copied. */
std::shared_ptr<const Node> oldNode, std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath, /* The prefix relative to which 'follows' should be
const Path & parentPath, interpreted. When a node is initially locked, it's
relative to the node's flake; when it's already locked,
it's relative to the root of the lock file. */
const InputPath & followsPrefix,
/* The source path of this node's flake. */
const SourcePath & sourcePath,
bool trustLock) bool trustLock)
{ {
debug("computing lock file node '%s'", printInputPath(inputPathPrefix)); debug("computing lock file node '%s'", printInputPath(inputPathPrefix));
@ -434,7 +472,12 @@ LockedFlake lockFlake(
auto inputPath(inputPathPrefix); auto inputPath(inputPathPrefix);
inputPath.push_back(id); inputPath.push_back(id);
inputPath.push_back(idOverride); inputPath.push_back(idOverride);
overrides.insert_or_assign(inputPath, inputOverride); overrides.emplace(inputPath,
OverrideTarget {
.input = inputOverride,
.sourcePath = sourcePath,
.parentInputPath = inputPathPrefix
});
} }
} }
@ -466,13 +509,18 @@ LockedFlake lockFlake(
auto i = overrides.find(inputPath); auto i = overrides.find(inputPath);
bool hasOverride = i != overrides.end(); bool hasOverride = i != overrides.end();
bool hasCliOverride = explicitCliOverrides.contains(inputPath); bool hasCliOverride = explicitCliOverrides.contains(inputPath);
if (hasOverride) { if (hasOverride)
overridesUsed.insert(inputPath); overridesUsed.insert(inputPath);
// Respect the “flakeness” of the input even if we auto input = hasOverride ? i->second.input : input2;
// override it
i->second.isFlake = input2.isFlake; /* Resolve relative 'path:' inputs relative to
} the source path of the overrider. */
auto & input = hasOverride ? i->second : input2; auto overridenSourcePath = hasOverride ? i->second.sourcePath : sourcePath;
/* Respect the "flakeness" of the input even if we
override it. */
if (hasOverride)
input.isFlake = input2.isFlake;
/* Resolve 'follows' later (since it may refer to an input /* Resolve 'follows' later (since it may refer to an input
path we haven't processed yet. */ path we haven't processed yet. */
@ -488,6 +536,33 @@ LockedFlake lockFlake(
assert(input.ref); assert(input.ref);
auto overridenParentPath =
input.ref->input.isRelative()
? std::optional<InputPath>(hasOverride ? i->second.parentInputPath : inputPathPrefix)
: std::nullopt;
auto resolveRelativePath = [&]() -> std::optional<SourcePath>
{
if (auto relativePath = input.ref->input.isRelative()) {
return SourcePath {
overridenSourcePath.accessor,
CanonPath(*relativePath, overridenSourcePath.path.parent().value())
};
} else
return std::nullopt;
};
/* Get the input flake, resolve 'path:./...'
flakerefs relative to the parent flake. */
auto getInputFlake = [&]()
{
if (auto resolvedPath = resolveRelativePath()) {
return readFlake(state, *input.ref, *input.ref, *input.ref, *resolvedPath, inputPath);
} else {
return getFlake(state, *input.ref, useRegistries, flakeCache, inputPath);
}
};
/* Do we have an entry in the existing lock file? /* Do we have an entry in the existing lock file?
And the input is not in updateInputs? */ And the input is not in updateInputs? */
std::shared_ptr<LockedNode> oldLock; std::shared_ptr<LockedNode> oldLock;
@ -501,6 +576,7 @@ LockedFlake lockFlake(
if (oldLock if (oldLock
&& oldLock->originalRef == *input.ref && oldLock->originalRef == *input.ref
&& oldLock->parentPath == overridenParentPath
&& !hasCliOverride) && !hasCliOverride)
{ {
debug("keeping existing input '%s'", inputPathS); debug("keeping existing input '%s'", inputPathS);
@ -509,7 +585,10 @@ LockedFlake lockFlake(
didn't change and there is no override from a didn't change and there is no override from a
higher level flake. */ higher level flake. */
auto childNode = make_ref<LockedNode>( auto childNode = make_ref<LockedNode>(
oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake); oldLock->lockedRef,
oldLock->originalRef,
oldLock->isFlake,
oldLock->parentPath);
node->inputs.insert_or_assign(id, childNode); node->inputs.insert_or_assign(id, childNode);
@ -551,7 +630,7 @@ LockedFlake lockFlake(
break; break;
} }
} }
auto absoluteFollows(lockRootPath); auto absoluteFollows(followsPrefix);
absoluteFollows.insert(absoluteFollows.end(), follows->begin(), follows->end()); absoluteFollows.insert(absoluteFollows.end(), follows->begin(), follows->end());
fakeInputs.emplace(i.first, FlakeInput { fakeInputs.emplace(i.first, FlakeInput {
.follows = absoluteFollows, .follows = absoluteFollows,
@ -561,11 +640,12 @@ LockedFlake lockFlake(
} }
if (mustRefetch) { if (mustRefetch) {
auto inputFlake = getFlake(state, oldLock->lockedRef, false, flakeCache, inputPath); auto inputFlake = getInputFlake();
nodePaths.emplace(childNode, inputFlake.path.parent()); nodePaths.emplace(childNode, inputFlake.path.parent());
computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, lockRootPath, parentPath, false); computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, followsPrefix,
inputFlake.path, false);
} else { } else {
computeLocks(fakeInputs, childNode, inputPath, oldLock, lockRootPath, parentPath, true); computeLocks(fakeInputs, childNode, inputPath, oldLock, followsPrefix, sourcePath, true);
} }
} else { } else {
@ -573,7 +653,9 @@ LockedFlake lockFlake(
this input. */ this input. */
debug("creating new input '%s'", inputPathS); debug("creating new input '%s'", inputPathS);
if (!lockFlags.allowUnlocked && !input.ref->input.isLocked()) if (!lockFlags.allowUnlocked
&& !input.ref->input.isLocked()
&& !input.ref->input.isRelative())
throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS); throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS);
/* Note: in case of an --override-input, we use /* Note: in case of an --override-input, we use
@ -586,17 +668,13 @@ LockedFlake lockFlake(
auto ref = (input2.ref && explicitCliOverrides.contains(inputPath)) ? *input2.ref : *input.ref; auto ref = (input2.ref && explicitCliOverrides.contains(inputPath)) ? *input2.ref : *input.ref;
if (input.isFlake) { if (input.isFlake) {
Path localPath = parentPath; auto inputFlake = getInputFlake();
FlakeRef localRef = *input.ref;
// If this input is a path, recurse it down. auto childNode = make_ref<LockedNode>(
// This allows us to resolve path inputs relative to the current flake. inputFlake.lockedRef,
if (localRef.input.getType() == "path") ref,
localPath = absPath(*input.ref->input.getSourcePath(), parentPath); true,
overridenParentPath);
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath);
auto childNode = make_ref<LockedNode>(inputFlake.lockedRef, ref);
node->inputs.insert_or_assign(id, childNode); node->inputs.insert_or_assign(id, childNode);
@ -617,18 +695,27 @@ LockedFlake lockFlake(
oldLock oldLock
? std::dynamic_pointer_cast<const Node>(oldLock) ? std::dynamic_pointer_cast<const Node>(oldLock)
: readLockFile(state.fetchSettings, inputFlake.lockFilePath()).root.get_ptr(), : readLockFile(state.fetchSettings, inputFlake.lockFilePath()).root.get_ptr(),
oldLock ? lockRootPath : inputPath, oldLock ? followsPrefix : inputPath,
localPath, inputFlake.path,
false); false);
} }
else { else {
auto [path, lockedRef] = [&]() -> std::tuple<SourcePath, FlakeRef>
{
// Handle non-flake 'path:./...' inputs.
if (auto resolvedPath = resolveRelativePath()) {
return {*resolvedPath, *input.ref};
} else {
auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree( auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache); state, *input.ref, useRegistries, flakeCache);
return {state.rootPath(state.store->toRealPath(storePath)), lockedRef};
}
}();
auto childNode = make_ref<LockedNode>(lockedRef, ref, false); auto childNode = make_ref<LockedNode>(lockedRef, ref, false, overridenParentPath);
nodePaths.emplace(childNode, state.rootPath(state.store->toRealPath(storePath))); nodePaths.emplace(childNode, path);
node->inputs.insert_or_assign(id, childNode); node->inputs.insert_or_assign(id, childNode);
} }
@ -641,9 +728,6 @@ LockedFlake lockFlake(
} }
}; };
// Bring in the current ref for relative path resolution if we have it
auto parentPath = flake.path.parent().path.abs();
nodePaths.emplace(newLockFile.root, flake.path.parent()); nodePaths.emplace(newLockFile.root, flake.path.parent());
computeLocks( computeLocks(
@ -652,7 +736,7 @@ LockedFlake lockFlake(
{}, {},
lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(), lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(),
{}, {},
parentPath, flake.path,
false); false);
for (auto & i : lockFlags.inputOverrides) for (auto & i : lockFlags.inputOverrides)
@ -678,9 +762,11 @@ LockedFlake lockFlake(
if (lockFlags.writeLockFile) { if (lockFlags.writeLockFile) {
if (sourcePath || lockFlags.outputLockFilePath) { if (sourcePath || lockFlags.outputLockFilePath) {
if (auto unlockedInput = newLockFile.isUnlocked()) { if (auto unlockedInput = newLockFile.isUnlocked(state.fetchSettings)) {
if (lockFlags.failOnUnlocked) if (lockFlags.failOnUnlocked)
throw Error("cannot write lock file of flake '%s' because it has an unlocked input ('%s').\n", topRef, *unlockedInput); throw Error(
"Will not write lock file of flake '%s' because it has an unlocked input ('%s'). "
"Use '--allow-dirty-locks' to allow this anyway.", topRef, *unlockedInput);
if (state.fetchSettings.warnDirty) if (state.fetchSettings.warnDirty)
warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput); warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
} else { } else {
@ -695,9 +781,9 @@ LockedFlake lockFlake(
writeFile(*lockFlags.outputLockFilePath, newLockFileS); writeFile(*lockFlags.outputLockFilePath, newLockFileS);
} else { } else {
auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"; auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock";
auto outputLockFilePath = *sourcePath + "/" + relPath; auto outputLockFilePath = *sourcePath / relPath;
bool lockFileExists = pathExists(outputLockFilePath); bool lockFileExists = fs::symlink_exists(outputLockFilePath);
auto s = chomp(diff); auto s = chomp(diff);
if (lockFileExists) { if (lockFileExists) {
@ -733,8 +819,7 @@ LockedFlake lockFlake(
repo, so we should re-read it. FIXME: we could repo, so we should re-read it. FIXME: we could
also just clear the 'rev' field... */ also just clear the 'rev' field... */
auto prevLockedRef = flake.lockedRef; auto prevLockedRef = flake.lockedRef;
FlakeCache dummyCache; flake = getFlake(state, topRef, useRegistries);
flake = getFlake(state, topRef, useRegistries, dummyCache);
if (lockFlags.commitLockFile && if (lockFlags.commitLockFile &&
flake.lockedRef.input.getRev() && flake.lockedRef.input.getRev() &&
@ -979,9 +1064,11 @@ static RegisterPrimOp r4({
} }
std::optional<Fingerprint> LockedFlake::getFingerprint(ref<Store> store) const std::optional<Fingerprint> LockedFlake::getFingerprint(
ref<Store> store,
const fetchers::Settings & fetchSettings) const
{ {
if (lockFile.isUnlocked()) return std::nullopt; if (lockFile.isUnlocked(fetchSettings)) return std::nullopt;
auto fingerprint = flake.lockedRef.input.getFingerprint(store); auto fingerprint = flake.lockedRef.input.getFingerprint(store);
if (!fingerprint) return std::nullopt; if (!fingerprint) return std::nullopt;

View file

@ -110,7 +110,7 @@ struct Flake
} }
}; };
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup); Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool useRegistries);
/** /**
* Fingerprint of a locked flake; used as a cache key. * Fingerprint of a locked flake; used as a cache key.
@ -129,7 +129,9 @@ struct LockedFlake
*/ */
std::map<ref<Node>, SourcePath> nodePaths; std::map<ref<Node>, SourcePath> nodePaths;
std::optional<Fingerprint> getFingerprint(ref<Store> store) const; std::optional<Fingerprint> getFingerprint(
ref<Store> store,
const fetchers::Settings & fetchSettings) const;
}; };
struct LockFlags struct LockFlags

View file

@ -48,9 +48,10 @@ FlakeRef parseFlakeRef(
const std::string & url, const std::string & url,
const std::optional<Path> & baseDir, const std::optional<Path> & baseDir,
bool allowMissing, bool allowMissing,
bool isFlake) bool isFlake,
bool preserveRelativePaths)
{ {
auto [flakeRef, fragment] = parseFlakeRefWithFragment(fetchSettings, url, baseDir, allowMissing, isFlake); auto [flakeRef, fragment] = parseFlakeRefWithFragment(fetchSettings, url, baseDir, allowMissing, isFlake, preserveRelativePaths);
if (fragment != "") if (fragment != "")
throw Error("unexpected fragment '%s' in flake reference '%s'", fragment, url); throw Error("unexpected fragment '%s' in flake reference '%s'", fragment, url);
return flakeRef; return flakeRef;
@ -87,7 +88,8 @@ std::pair<FlakeRef, std::string> parsePathFlakeRefWithFragment(
const std::string & url, const std::string & url,
const std::optional<Path> & baseDir, const std::optional<Path> & baseDir,
bool allowMissing, bool allowMissing,
bool isFlake) bool isFlake,
bool preserveRelativePaths)
{ {
static std::regex pathFlakeRegex( static std::regex pathFlakeRegex(
R"(([^?#]*)(\?([^#]*))?(#(.*))?)", R"(([^?#]*)(\?([^#]*))?(#(.*))?)",
@ -178,9 +180,8 @@ std::pair<FlakeRef, std::string> parsePathFlakeRefWithFragment(
} }
} else { } else {
if (!hasPrefix(path, "/")) if (!preserveRelativePaths && !isAbsolute(path))
throw BadURL("flake reference '%s' is not an absolute path", url); throw BadURL("flake reference '%s' is not an absolute path", url);
path = canonPath(path + "/" + getOr(query, "dir", ""));
} }
return fromParsedURL(fetchSettings, { return fromParsedURL(fetchSettings, {
@ -199,8 +200,7 @@ std::pair<FlakeRef, std::string> parsePathFlakeRefWithFragment(
static std::optional<std::pair<FlakeRef, std::string>> parseFlakeIdRef( static std::optional<std::pair<FlakeRef, std::string>> parseFlakeIdRef(
const fetchers::Settings & fetchSettings, const fetchers::Settings & fetchSettings,
const std::string & url, const std::string & url,
bool isFlake bool isFlake)
)
{ {
std::smatch match; std::smatch match;
@ -228,11 +228,15 @@ std::optional<std::pair<FlakeRef, std::string>> parseURLFlakeRef(
const fetchers::Settings & fetchSettings, const fetchers::Settings & fetchSettings,
const std::string & url, const std::string & url,
const std::optional<Path> & baseDir, const std::optional<Path> & baseDir,
bool isFlake bool isFlake)
)
{ {
try { try {
return fromParsedURL(fetchSettings, parseURL(url), isFlake); auto parsed = parseURL(url);
if (baseDir
&& (parsed.scheme == "path" || parsed.scheme == "git+file")
&& !isAbsolute(parsed.path))
parsed.path = absPath(parsed.path, *baseDir);
return fromParsedURL(fetchSettings, std::move(parsed), isFlake);
} catch (BadURL &) { } catch (BadURL &) {
return std::nullopt; return std::nullopt;
} }
@ -243,7 +247,8 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
const std::string & url, const std::string & url,
const std::optional<Path> & baseDir, const std::optional<Path> & baseDir,
bool allowMissing, bool allowMissing,
bool isFlake) bool isFlake,
bool preserveRelativePaths)
{ {
using namespace fetchers; using namespace fetchers;
@ -252,7 +257,7 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
} else if (auto res = parseURLFlakeRef(fetchSettings, url, baseDir, isFlake)) { } else if (auto res = parseURLFlakeRef(fetchSettings, url, baseDir, isFlake)) {
return *res; return *res;
} else { } else {
return parsePathFlakeRefWithFragment(fetchSettings, url, baseDir, allowMissing, isFlake); return parsePathFlakeRefWithFragment(fetchSettings, url, baseDir, allowMissing, isFlake, preserveRelativePaths);
} }
} }

View file

@ -84,7 +84,8 @@ FlakeRef parseFlakeRef(
const std::string & url, const std::string & url,
const std::optional<Path> & baseDir = {}, const std::optional<Path> & baseDir = {},
bool allowMissing = false, bool allowMissing = false,
bool isFlake = true); bool isFlake = true,
bool preserveRelativePaths = false);
/** /**
* @param baseDir Optional [base directory](https://nixos.org/manual/nix/unstable/glossary#gloss-base-directory) * @param baseDir Optional [base directory](https://nixos.org/manual/nix/unstable/glossary#gloss-base-directory)
@ -102,7 +103,8 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
const std::string & url, const std::string & url,
const std::optional<Path> & baseDir = {}, const std::optional<Path> & baseDir = {},
bool allowMissing = false, bool allowMissing = false,
bool isFlake = true); bool isFlake = true,
bool preserveRelativePaths = false);
/** /**
* @param baseDir Optional [base directory](https://nixos.org/manual/nix/unstable/glossary#gloss-base-directory) * @param baseDir Optional [base directory](https://nixos.org/manual/nix/unstable/glossary#gloss-base-directory)

View file

@ -10,6 +10,7 @@
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include "strings.hh" #include "strings.hh"
#include "flake/settings.hh"
namespace nix::flake { namespace nix::flake {
@ -42,9 +43,10 @@ LockedNode::LockedNode(
: lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info"
, originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
, parentPath(json.find("parent") != json.end() ? (std::optional<InputPath>) json["parent"] : std::nullopt)
{ {
if (!lockedRef.input.isLocked()) if (!lockedRef.input.isConsideredLocked(fetchSettings) && !lockedRef.input.isRelative())
throw Error("lock file contains unlocked input '%s'", throw Error("Lock file contains unlocked input '%s'. Use '--allow-dirty-locks' to accept this lock file.",
fetchers::attrsToJSON(lockedRef.input.toAttrs())); fetchers::attrsToJSON(lockedRef.input.toAttrs()));
// For backward compatibility, lock file entries are implicitly final. // For backward compatibility, lock file entries are implicitly final.
@ -197,10 +199,12 @@ std::pair<nlohmann::json, LockFile::KeyMap> LockFile::toJSON() const
/* For backward compatibility, omit the "__final" /* For backward compatibility, omit the "__final"
attribute. We never allow non-final inputs in lock files attribute. We never allow non-final inputs in lock files
anyway. */ anyway. */
assert(lockedNode->lockedRef.input.isFinal()); assert(lockedNode->lockedRef.input.isFinal() || lockedNode->lockedRef.input.isRelative());
n["locked"].erase("__final"); n["locked"].erase("__final");
if (!lockedNode->isFlake) if (!lockedNode->isFlake)
n["flake"] = false; n["flake"] = false;
if (lockedNode->parentPath)
n["parent"] = *lockedNode->parentPath;
} }
nodes[key] = std::move(n); nodes[key] = std::move(n);
@ -228,7 +232,7 @@ std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
return stream; return stream;
} }
std::optional<FlakeRef> LockFile::isUnlocked() const std::optional<FlakeRef> LockFile::isUnlocked(const fetchers::Settings & fetchSettings) const
{ {
std::set<ref<const Node>> nodes; std::set<ref<const Node>> nodes;
@ -247,7 +251,10 @@ std::optional<FlakeRef> LockFile::isUnlocked() const
for (auto & i : nodes) { for (auto & i : nodes) {
if (i == ref<const Node>(root)) continue; if (i == ref<const Node>(root)) continue;
auto node = i.dynamic_pointer_cast<const LockedNode>(); auto node = i.dynamic_pointer_cast<const LockedNode>();
if (node && (!node->lockedRef.input.isLocked() || !node->lockedRef.input.isFinal())) if (node
&& (!node->lockedRef.input.isConsideredLocked(fetchSettings)
|| !node->lockedRef.input.isFinal())
&& !node->lockedRef.input.isRelative())
return node->lockedRef; return node->lockedRef;
} }

View file

@ -38,11 +38,19 @@ struct LockedNode : Node
FlakeRef lockedRef, originalRef; FlakeRef lockedRef, originalRef;
bool isFlake = true; bool isFlake = true;
/* The node relative to which relative source paths
(e.g. 'path:../foo') are interpreted. */
std::optional<InputPath> parentPath;
LockedNode( LockedNode(
const FlakeRef & lockedRef, const FlakeRef & lockedRef,
const FlakeRef & originalRef, const FlakeRef & originalRef,
bool isFlake = true) bool isFlake = true,
: lockedRef(lockedRef), originalRef(originalRef), isFlake(isFlake) std::optional<InputPath> parentPath = {})
: lockedRef(lockedRef)
, originalRef(originalRef)
, isFlake(isFlake)
, parentPath(parentPath)
{ } { }
LockedNode( LockedNode(
@ -71,7 +79,7 @@ struct LockFile
* Check whether this lock file has any unlocked or non-final * Check whether this lock file has any unlocked or non-final
* inputs. If so, return one. * inputs. If so, return one.
*/ */
std::optional<FlakeRef> isUnlocked() const; std::optional<FlakeRef> isUnlocked(const fetchers::Settings & fetchSettings) const;
bool operator ==(const LockFile & other) const; bool operator ==(const LockFile & other) const;

View file

@ -29,7 +29,7 @@ struct Settings : public Config
this, this,
false, false,
"accept-flake-config", "accept-flake-config",
"Whether to accept nix configuration from a flake without prompting.", "Whether to accept Nix configuration settings from a flake without prompting.",
{}, {},
true, true,
Xp::Flakes}; Xp::Flakes};

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util , nix-util
@ -48,10 +47,6 @@ mkMesonLibrary (finalAttrs: {
echo ${version} > ../../.version echo ${version} > ../../.version
''; '';
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util-c , nix-util-c
@ -51,10 +50,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, openssl , openssl
@ -45,10 +44,6 @@ mkMesonLibrary (finalAttrs: {
echo ${version} > ../../.version echo ${version} > ../../.version
''; '';
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -287,7 +287,6 @@ public:
else if (type == resBuildLogLine || type == resPostBuildLogLine) { else if (type == resBuildLogLine || type == resPostBuildLogLine) {
auto lastLine = chomp(getS(fields, 0)); auto lastLine = chomp(getS(fields, 0));
if (!lastLine.empty()) {
auto i = state->its.find(act); auto i = state->its.find(act);
assert(i != state->its.end()); assert(i != state->its.end());
ActInfo info = *i->second; ActInfo info = *i->second;
@ -305,7 +304,6 @@ public:
update(*state); update(*state);
} }
} }
}
else if (type == resUntrustedPath) { else if (type == resUntrustedPath) {
state->untrustedPaths++; state->untrustedPaths++;

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util-c , nix-util-c
@ -47,10 +46,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util-test-support , nix-util-test-support
@ -51,10 +50,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -62,10 +62,6 @@ mkMesonExecutable (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
passthru = { passthru = {
tests = { tests = {
run = let run = let

View file

@ -50,8 +50,9 @@ struct FileTransferSettings : Config
Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size", Setting<size_t> downloadBufferSize{this, 64 * 1024 * 1024, "download-buffer-size",
R"( R"(
The size of Nix's internal download buffer during `curl` transfers. If data is The size of Nix's internal download buffer in bytes during `curl` transfers. If data is
not processed quickly enough to exceed the size of this buffer, downloads may stall. not processed quickly enough to exceed the size of this buffer, downloads may stall.
The default is 67108864 (64 MiB).
)"}; )"};
}; };

View file

@ -455,7 +455,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
bool gcKeepOutputs = settings.gcKeepOutputs; bool gcKeepOutputs = settings.gcKeepOutputs;
bool gcKeepDerivations = settings.gcKeepDerivations; bool gcKeepDerivations = settings.gcKeepDerivations;
StorePathSet roots, dead, alive; std::unordered_set<StorePath> roots, dead, alive;
struct Shared struct Shared
{ {
@ -661,7 +661,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
} }
}; };
std::map<StorePath, StorePathSet> referrersCache; std::unordered_map<StorePath, StorePathSet> referrersCache;
/* Helper function that visits all paths reachable from `start` /* Helper function that visits all paths reachable from `start`
via the referrers edges and optionally derivers and derivation via the referrers edges and optionally derivers and derivation

View file

@ -291,7 +291,11 @@ json listNar(ref<SourceAccessor> accessor, const CanonPath & path, bool recurse)
obj["type"] = "symlink"; obj["type"] = "symlink";
obj["target"] = accessor->readLink(path); obj["target"] = accessor->readLink(path);
break; break;
case SourceAccessor::Type::tMisc: case SourceAccessor::Type::tBlock:
case SourceAccessor::Type::tChar:
case SourceAccessor::Type::tSocket:
case SourceAccessor::Type::tFifo:
case SourceAccessor::Type::tUnknown:
assert(false); // cannot happen for NARs assert(false); // cannot happen for NARs
} }
return obj; return obj;

View file

@ -87,8 +87,6 @@ mkMesonLibrary (finalAttrs: {
# https://github.com/NixOS/nixpkgs/issues/86131. # https://github.com/NixOS/nixpkgs/issues/86131.
BOOST_INCLUDEDIR = "${lib.getDev boost}/include"; BOOST_INCLUDEDIR = "${lib.getDev boost}/include";
BOOST_LIBRARYDIR = "${lib.getLib boost}/lib"; BOOST_LIBRARYDIR = "${lib.getLib boost}/lib";
} // lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
}; };
meta = { meta = {

View file

@ -534,14 +534,17 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
void RemoteStore::addMultipleToStore( void RemoteStore::addMultipleToStore(
PathsSource & pathsToCopy, PathsSource && pathsToCopy,
Activity & act, Activity & act,
RepairFlag repair, RepairFlag repair,
CheckSigsFlag checkSigs) CheckSigsFlag checkSigs)
{ {
auto source = sinkToSource([&](Sink & sink) { auto source = sinkToSource([&](Sink & sink) {
sink << pathsToCopy.size(); sink << pathsToCopy.size();
for (auto & [pathInfo, pathSource] : pathsToCopy) { // Reverse, so we can release memory at the original start
std::reverse(pathsToCopy.begin(), pathsToCopy.end());
while (!pathsToCopy.empty()) {
auto & [pathInfo, pathSource] = pathsToCopy.back();
WorkerProto::Serialise<ValidPathInfo>::write(*this, WorkerProto::Serialise<ValidPathInfo>::write(*this,
WorkerProto::WriteConn { WorkerProto::WriteConn {
.to = sink, .to = sink,
@ -549,6 +552,7 @@ void RemoteStore::addMultipleToStore(
}, },
pathInfo); pathInfo);
pathSource->drainInto(sink); pathSource->drainInto(sink);
pathsToCopy.pop_back();
} }
}); });

View file

@ -102,7 +102,7 @@ public:
CheckSigsFlag checkSigs) override; CheckSigsFlag checkSigs) override;
void addMultipleToStore( void addMultipleToStore(
PathsSource & pathsToCopy, PathsSource && pathsToCopy,
Activity & act, Activity & act,
RepairFlag repair, RepairFlag repair,
CheckSigsFlag checkSigs) override; CheckSigsFlag checkSigs) override;

View file

@ -223,7 +223,7 @@ StorePath Store::addToStore(
} }
void Store::addMultipleToStore( void Store::addMultipleToStore(
PathsSource & pathsToCopy, PathsSource && pathsToCopy,
Activity & act, Activity & act,
RepairFlag repair, RepairFlag repair,
CheckSigsFlag checkSigs) CheckSigsFlag checkSigs)
@ -246,9 +246,7 @@ void Store::addMultipleToStore(
act.progress(nrDone, pathsToCopy.size(), nrRunning, nrFailed); act.progress(nrDone, pathsToCopy.size(), nrRunning, nrFailed);
}; };
ThreadPool pool; processGraph<StorePath>(
processGraph<StorePath>(pool,
storePathsToAdd, storePathsToAdd,
[&](const StorePath & path) { [&](const StorePath & path) {
@ -1028,12 +1026,10 @@ std::map<StorePath, StorePath> copyPaths(
} }
auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute);
ThreadPool pool;
try { try {
// Copy the realisation closure // Copy the realisation closure
processGraph<Realisation>( processGraph<Realisation>(
pool, Realisation::closure(srcStore, toplevelRealisations), Realisation::closure(srcStore, toplevelRealisations),
[&](const Realisation & current) -> std::set<Realisation> { [&](const Realisation & current) -> std::set<Realisation> {
std::set<Realisation> children; std::set<Realisation> children;
for (const auto & [drvOutput, _] : current.dependentRealisations) { for (const auto & [drvOutput, _] : current.dependentRealisations) {
@ -1142,7 +1138,7 @@ std::map<StorePath, StorePath> copyPaths(
pathsToCopy.push_back(std::pair{infoForDst, std::move(source)}); pathsToCopy.push_back(std::pair{infoForDst, std::move(source)});
} }
dstStore.addMultipleToStore(pathsToCopy, act, repair, checkSigs); dstStore.addMultipleToStore(std::move(pathsToCopy), act, repair, checkSigs);
return pathsMap; return pathsMap;
} }

View file

@ -425,7 +425,7 @@ public:
CheckSigsFlag checkSigs = CheckSigs); CheckSigsFlag checkSigs = CheckSigs);
virtual void addMultipleToStore( virtual void addMultipleToStore(
PathsSource & pathsToCopy, PathsSource && pathsToCopy,
Activity & act, Activity & act,
RepairFlag repair = NoRepair, RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs); CheckSigsFlag checkSigs = CheckSigs);

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util , nix-util
@ -45,10 +44,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonLibrary , mkMesonLibrary
, nix-util , nix-util
@ -49,10 +48,6 @@ mkMesonLibrary (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -55,10 +55,6 @@ mkMesonExecutable (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
passthru = { passthru = {
tests = { tests = {
run = runCommand "${finalAttrs.pname}-run" { run = runCommand "${finalAttrs.pname}-run" {

View file

@ -31,12 +31,7 @@ namespace nix {
namespace fs { using namespace std::filesystem; } namespace fs { using namespace std::filesystem; }
/** bool isAbsolute(PathView path)
* Treat the string as possibly an absolute path, by inspecting the
* start of it. Return whether it was probably intended to be
* absolute.
*/
static bool isAbsolute(PathView path)
{ {
return fs::path { path }.is_absolute(); return fs::path { path }.is_absolute();
} }
@ -648,7 +643,7 @@ void setWriteTime(
// doesn't support access time just modification time. // doesn't support access time just modification time.
// //
// System clock vs File clock issues also make that annoying. // System clock vs File clock issues also make that annoying.
warn("Changing file times is not yet implemented on Windows, path is '%s'", path); warn("Changing file times is not yet implemented on Windows, path is %s", path);
#elif HAVE_UTIMENSAT && HAVE_DECL_AT_SYMLINK_NOFOLLOW #elif HAVE_UTIMENSAT && HAVE_DECL_AT_SYMLINK_NOFOLLOW
struct timespec times[2] = { struct timespec times[2] = {
{ {
@ -661,7 +656,7 @@ void setWriteTime(
}, },
}; };
if (utimensat(AT_FDCWD, path.c_str(), times, AT_SYMLINK_NOFOLLOW) == -1) if (utimensat(AT_FDCWD, path.c_str(), times, AT_SYMLINK_NOFOLLOW) == -1)
throw SysError("changing modification time of '%s' (using `utimensat`)", path); throw SysError("changing modification time of %s (using `utimensat`)", path);
#else #else
struct timeval times[2] = { struct timeval times[2] = {
{ {
@ -675,7 +670,7 @@ void setWriteTime(
}; };
#if HAVE_LUTIMES #if HAVE_LUTIMES
if (lutimes(path.c_str(), times) == -1) if (lutimes(path.c_str(), times) == -1)
throw SysError("changing modification time of '%s'", path); throw SysError("changing modification time of %s", path);
#else #else
bool isSymlink = optIsSymlink bool isSymlink = optIsSymlink
? *optIsSymlink ? *optIsSymlink
@ -683,9 +678,9 @@ void setWriteTime(
if (!isSymlink) { if (!isSymlink) {
if (utimes(path.c_str(), times) == -1) if (utimes(path.c_str(), times) == -1)
throw SysError("changing modification time of '%s' (not a symlink)", path); throw SysError("changing modification time of %s (not a symlink)", path);
} else { } else {
throw Error("Cannot modification time of symlink '%s'", path); throw Error("Cannot modification time of symlink %s", path);
} }
#endif #endif
#endif #endif
@ -714,7 +709,7 @@ void copyFile(const fs::path & from, const fs::path & to, bool andDelete)
copyFile(entry, to / entry.path().filename(), andDelete); copyFile(entry, to / entry.path().filename(), andDelete);
} }
} else { } else {
throw Error("file '%s' has an unsupported type", from); throw Error("file %s has an unsupported type", from);
} }
setWriteTime(to, lstat(from.string().c_str())); setWriteTime(to, lstat(from.string().c_str()));
@ -741,7 +736,7 @@ void moveFile(const Path & oldName, const Path & newName)
auto tempCopyTarget = temp / "copy-target"; auto tempCopyTarget = temp / "copy-target";
if (e.code().value() == EXDEV) { if (e.code().value() == EXDEV) {
fs::remove(newPath); fs::remove(newPath);
warn("Cant rename %s as %s, copying instead", oldName, newName); warn("cant rename %s as %s, copying instead", oldName, newName);
copyFile(oldPath, tempCopyTarget, true); copyFile(oldPath, tempCopyTarget, true);
std::filesystem::rename( std::filesystem::rename(
os_string_to_string(PathViewNG { tempCopyTarget }), os_string_to_string(PathViewNG { tempCopyTarget }),

View file

@ -42,6 +42,11 @@ namespace nix {
struct Sink; struct Sink;
struct Source; struct Source;
/**
* Return whether the path denotes an absolute path.
*/
bool isAbsolute(PathView path);
/** /**
* @return An absolutized path, resolving paths relative to the * @return An absolutized path, resolving paths relative to the
* specified directory, or the current directory otherwise. The path * specified directory, or the current directory otherwise. The path

View file

@ -49,11 +49,13 @@ void copyRecursive(
break; break;
} }
case SourceAccessor::tMisc: case SourceAccessor::tChar:
throw Error("file '%1%' has an unsupported type", from); case SourceAccessor::tBlock:
case SourceAccessor::tSocket:
case SourceAccessor::tFifo:
case SourceAccessor::tUnknown:
default: default:
unreachable(); throw Error("file '%1%' has an unsupported type of %2%", from, stat.typeString());
} }
} }
@ -110,7 +112,7 @@ void RestoreSink::createRegularFile(const CanonPath & path, std::function<void(C
crf.startFsync = startFsync; crf.startFsync = startFsync;
crf.fd = crf.fd =
#ifdef _WIN32 #ifdef _WIN32
CreateFileW(p.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL) CreateFileW(p.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL)
#else #else
open(p.c_str(), O_CREAT | O_EXCL | O_WRONLY | O_CLOEXEC, 0666) open(p.c_str(), O_CREAT | O_EXCL | O_WRONLY | O_CLOEXEC, 0666)
#endif #endif

View file

@ -200,7 +200,11 @@ std::optional<Mode> convertMode(SourceAccessor::Type type)
case SourceAccessor::tSymlink: return Mode::Symlink; case SourceAccessor::tSymlink: return Mode::Symlink;
case SourceAccessor::tRegular: return Mode::Regular; case SourceAccessor::tRegular: return Mode::Regular;
case SourceAccessor::tDirectory: return Mode::Directory; case SourceAccessor::tDirectory: return Mode::Directory;
case SourceAccessor::tMisc: return std::nullopt; case SourceAccessor::tChar:
case SourceAccessor::tBlock:
case SourceAccessor::tSocket:
case SourceAccessor::tFifo: return std::nullopt;
case SourceAccessor::tUnknown:
default: unreachable(); default: unreachable();
} }
} }
@ -314,9 +318,13 @@ Mode dump(
return Mode::Symlink; return Mode::Symlink;
} }
case SourceAccessor::tMisc: case SourceAccessor::tChar:
case SourceAccessor::tBlock:
case SourceAccessor::tSocket:
case SourceAccessor::tFifo:
case SourceAccessor::tUnknown:
default: default:
throw Error("file '%1%' has an unsupported type", path); throw Error("file '%1%' has an unsupported type of %2%", path, st.typeString());
} }
} }

View file

@ -72,8 +72,6 @@ mkMesonLibrary (finalAttrs: {
# https://github.com/NixOS/nixpkgs/issues/86131. # https://github.com/NixOS/nixpkgs/issues/86131.
BOOST_INCLUDEDIR = "${lib.getDev boost}/include"; BOOST_INCLUDEDIR = "${lib.getDev boost}/include";
BOOST_LIBRARYDIR = "${lib.getLib boost}/lib"; BOOST_LIBRARYDIR = "${lib.getLib boost}/lib";
} // lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
}; };
meta = { meta = {

View file

@ -122,7 +122,13 @@ std::optional<SourceAccessor::Stat> PosixSourceAccessor::maybeLstat(const CanonP
S_ISREG(st->st_mode) ? tRegular : S_ISREG(st->st_mode) ? tRegular :
S_ISDIR(st->st_mode) ? tDirectory : S_ISDIR(st->st_mode) ? tDirectory :
S_ISLNK(st->st_mode) ? tSymlink : S_ISLNK(st->st_mode) ? tSymlink :
tMisc, S_ISCHR(st->st_mode) ? tChar :
S_ISBLK(st->st_mode) ? tBlock :
#ifdef S_ISSOCK
S_ISSOCK(st->st_mode) ? tSocket :
#endif
S_ISFIFO(st->st_mode) ? tFifo :
tUnknown,
.fileSize = S_ISREG(st->st_mode) ? std::optional<uint64_t>(st->st_size) : std::nullopt, .fileSize = S_ISREG(st->st_mode) ? std::optional<uint64_t>(st->st_size) : std::nullopt,
.isExecutable = S_ISREG(st->st_mode) && st->st_mode & S_IXUSR, .isExecutable = S_ISREG(st->st_mode) && st->st_mode & S_IXUSR,
}; };
@ -156,7 +162,11 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath &
case std::filesystem::file_type::regular: return Type::tRegular; break; case std::filesystem::file_type::regular: return Type::tRegular; break;
case std::filesystem::file_type::symlink: return Type::tSymlink; break; case std::filesystem::file_type::symlink: return Type::tSymlink; break;
case std::filesystem::file_type::directory: return Type::tDirectory; break; case std::filesystem::file_type::directory: return Type::tDirectory; break;
default: return tMisc; case std::filesystem::file_type::character: return Type::tChar; break;
case std::filesystem::file_type::block: return Type::tBlock; break;
case std::filesystem::file_type::fifo: return Type::tFifo; break;
case std::filesystem::file_type::socket: return Type::tSocket; break;
default: return tUnknown;
} }
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
}(); }();

View file

@ -5,6 +5,26 @@ namespace nix {
static std::atomic<size_t> nextNumber{0}; static std::atomic<size_t> nextNumber{0};
bool SourceAccessor::Stat::isNotNARSerialisable()
{
return this->type != tRegular && this->type != tSymlink && this->type != tDirectory;
}
std::string SourceAccessor::Stat::typeString() {
switch (this->type) {
case tRegular: return "regular";
case tSymlink: return "symlink";
case tDirectory: return "directory";
case tChar: return "character device";
case tBlock: return "block device";
case tSocket: return "socket";
case tFifo: return "fifo";
case tUnknown:
default: return "unknown";
}
return "unknown";
}
SourceAccessor::SourceAccessor() SourceAccessor::SourceAccessor()
: number(++nextNumber) : number(++nextNumber)
, displayPrefix{"«unknown»"} , displayPrefix{"«unknown»"}
@ -95,7 +115,7 @@ CanonPath SourceAccessor::resolveSymlinks(
throw Error("infinite symlink recursion in path '%s'", showPath(path)); throw Error("infinite symlink recursion in path '%s'", showPath(path));
auto target = readLink(res); auto target = readLink(res);
res.pop(); res.pop();
if (hasPrefix(target, "/")) if (isAbsolute(target))
res = CanonPath::root; res = CanonPath::root;
todo.splice(todo.begin(), tokenizeString<std::list<std::string>>(target, "/")); todo.splice(todo.begin(), tokenizeString<std::list<std::string>>(target, "/"));
} }

View file

@ -88,12 +88,13 @@ struct SourceAccessor : std::enable_shared_from_this<SourceAccessor>
Unlike `DT_UNKNOWN`, this must not be used for deferring the lookup of types. Unlike `DT_UNKNOWN`, this must not be used for deferring the lookup of types.
*/ */
tMisc tChar, tBlock, tSocket, tFifo,
tUnknown
}; };
struct Stat struct Stat
{ {
Type type = tMisc; Type type = tUnknown;
/** /**
* For regular files only: the size of the file. Not all * For regular files only: the size of the file. Not all
@ -112,6 +113,9 @@ struct SourceAccessor : std::enable_shared_from_this<SourceAccessor>
* file in the NAR. Only returned by NAR accessors. * file in the NAR. Only returned by NAR accessors.
*/ */
std::optional<uint64_t> narOffset; std::optional<uint64_t> narOffset;
bool isNotNARSerialisable();
std::string typeString();
}; };
Stat lstat(const CanonPath & path); Stat lstat(const CanonPath & path);

View file

@ -83,7 +83,6 @@ private:
*/ */
template<typename T> template<typename T>
void processGraph( void processGraph(
ThreadPool & pool,
const std::set<T> & nodes, const std::set<T> & nodes,
std::function<std::set<T>(const T &)> getEdges, std::function<std::set<T>(const T &)> getEdges,
std::function<void(const T &)> processNode) std::function<void(const T &)> processNode)
@ -97,6 +96,10 @@ void processGraph(
std::function<void(const T &)> worker; std::function<void(const T &)> worker;
/* Create pool last to ensure threads are stopped before other destructors
* run */
ThreadPool pool;
worker = [&](const T & node) { worker = [&](const T & node) {
{ {
@ -147,8 +150,16 @@ void processGraph(
} }
}; };
for (auto & node : nodes) for (auto & node : nodes) {
try {
pool.enqueue(std::bind(worker, std::ref(node))); pool.enqueue(std::bind(worker, std::ref(node)));
} catch (ThreadPoolShutDown &) {
/* Stop if the thread pool is shutting down. It means a
previous work item threw an exception, so process()
below will rethrow it. */
break;
}
}
pool.process(); pool.process();

View file

@ -501,9 +501,17 @@ static bool keep(PackageInfo & drv)
return drv.queryMetaBool("keep", false); return drv.queryMetaBool("keep", false);
} }
static void setMetaFlag(EvalState & state, PackageInfo & drv,
const std::string & name, const std::string & value)
{
auto v = state.allocValue();
v->mkString(value);
drv.setMeta(name, v);
}
static void installDerivations(Globals & globals, static void installDerivations(Globals & globals,
const Strings & args, const Path & profile) const Strings & args, const Path & profile, std::optional<int> priority)
{ {
debug("installing derivations"); debug("installing derivations");
@ -527,6 +535,11 @@ static void installDerivations(Globals & globals,
newNames.insert(DrvName(i.queryName()).name); newNames.insert(DrvName(i.queryName()).name);
} }
if (priority) {
for (auto & drv : newElems) {
setMetaFlag(*globals.state, drv, "priority", std::to_string((priority.value())));
}
}
while (true) { while (true) {
auto lockToken = optimisticLockProfile(profile); auto lockToken = optimisticLockProfile(profile);
@ -564,6 +577,7 @@ static void installDerivations(Globals & globals,
static void opInstall(Globals & globals, Strings opFlags, Strings opArgs) static void opInstall(Globals & globals, Strings opFlags, Strings opArgs)
{ {
std::optional<int> priority;
for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) { for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
auto arg = *i++; auto arg = *i++;
if (parseInstallSourceOptions(globals, i, opFlags, arg)) ; if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
@ -571,10 +585,17 @@ static void opInstall(Globals & globals, Strings opFlags, Strings opArgs)
globals.preserveInstalled = true; globals.preserveInstalled = true;
else if (arg == "--remove-all" || arg == "-r") else if (arg == "--remove-all" || arg == "-r")
globals.removeAll = true; globals.removeAll = true;
else if (arg == "--priority") {
if (i == opFlags.end())
throw UsageError("'%1%' requires an argument", arg);
priority = string2Int<int>(*i++);
if (!priority)
throw UsageError("'--priority' requires an integer argument");
}
else throw UsageError("unknown flag '%1%'", arg); else throw UsageError("unknown flag '%1%'", arg);
} }
installDerivations(globals, opArgs, globals.profile); installDerivations(globals, opArgs, globals.profile, priority);
} }
@ -689,15 +710,6 @@ static void opUpgrade(Globals & globals, Strings opFlags, Strings opArgs)
} }
static void setMetaFlag(EvalState & state, PackageInfo & drv,
const std::string & name, const std::string & value)
{
auto v = state.allocValue();
v->mkString(value);
drv.setMeta(name, v);
}
static void opSetFlag(Globals & globals, Strings opFlags, Strings opArgs) static void opSetFlag(Globals & globals, Strings opFlags, Strings opArgs)
{ {
if (opFlags.size() > 0) if (opFlags.size() > 0)
@ -1507,7 +1519,8 @@ static int main_nix_env(int argc, char * * argv)
opFlags.push_back(*arg); opFlags.push_back(*arg);
/* FIXME: hacky */ /* FIXME: hacky */
if (*arg == "--from-profile" || if (*arg == "--from-profile" ||
(op == opQuery && (*arg == "--attr" || *arg == "-A"))) (op == opQuery && (*arg == "--attr" || *arg == "-A")) ||
(op == opInstall && (*arg == "--priority")))
opFlags.push_back(getArg(*arg, arg, end)); opFlags.push_back(getArg(*arg, arg, end));
} }
else else

View file

@ -696,7 +696,7 @@ struct CmdDevelop : Common, MixEnvironment
auto sourcePath = installableFlake->getLockedFlake()->flake.resolvedRef.input.getSourcePath(); auto sourcePath = installableFlake->getLockedFlake()->flake.resolvedRef.input.getSourcePath();
if (sourcePath) { if (sourcePath) {
if (chdir(sourcePath->c_str()) == -1) { if (chdir(sourcePath->c_str()) == -1) {
throw SysError("chdir to '%s' failed", *sourcePath); throw SysError("chdir to %s failed", *sourcePath);
} }
} }
} }

View file

@ -238,7 +238,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
j["lastModified"] = *lastModified; j["lastModified"] = *lastModified;
j["path"] = storePath; j["path"] = storePath;
j["locks"] = lockedFlake.lockFile.toJSON().first; j["locks"] = lockedFlake.lockFile.toJSON().first;
if (auto fingerprint = lockedFlake.getFingerprint(store)) if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings))
j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false); j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false);
logger->cout("%s", j.dump()); logger->cout("%s", j.dump());
} else { } else {
@ -272,7 +272,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
logger->cout( logger->cout(
ANSI_BOLD "Last modified:" ANSI_NORMAL " %s", ANSI_BOLD "Last modified:" ANSI_NORMAL " %s",
std::put_time(std::localtime(&*lastModified), "%F %T")); std::put_time(std::localtime(&*lastModified), "%F %T"));
if (auto fingerprint = lockedFlake.getFingerprint(store)) if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings))
logger->cout( logger->cout(
ANSI_BOLD "Fingerprint:" ANSI_NORMAL " %s", ANSI_BOLD "Fingerprint:" ANSI_NORMAL " %s",
fingerprint->to_string(HashFormat::Base16, false)); fingerprint->to_string(HashFormat::Base16, false));
@ -941,7 +941,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand
createSymlink(target, os_string_to_string(PathViewNG { to2 })); createSymlink(target, os_string_to_string(PathViewNG { to2 }));
} }
else else
throw Error("file '%s' has unsupported type", from2); throw Error("path '%s' needs to be a symlink, file, or directory but instead is a %s", from2, st.typeString());
changedFiles.push_back(to2); changedFiles.push_back(to2);
notice("wrote: %s", to2); notice("wrote: %s", to2);
} }

View file

@ -187,7 +187,7 @@ Currently the `type` attribute can be one of the following:
* `nixpkgs/nixos-unstable/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293` * `nixpkgs/nixos-unstable/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293`
* `sub/dir` (if a flake named `sub` is in the registry) * `sub/dir` (if a flake named `sub` is in the registry)
* `path`: arbitrary local directories. The required attribute `path` * <a name="path-fetcher"></a>`path`: arbitrary local directories. The required attribute `path`
specifies the path of the flake. The URL form is specifies the path of the flake. The URL form is
``` ```
@ -200,18 +200,38 @@ Currently the `type` attribute can be one of the following:
If the flake at *path* is not inside a git repository, the `path:` If the flake at *path* is not inside a git repository, the `path:`
prefix is implied and can be omitted. prefix is implied and can be omitted.
*path* generally must be an absolute path. However, on the command If *path* is a relative path (i.e. if it does not start with `/`),
line, it can be a relative path (e.g. `.` or `./foo`) which is it is interpreted as follows:
interpreted as relative to the current directory. In this case, it
must start with `.` to avoid ambiguity with registry lookups - If *path* is a command line argument, it is interpreted relative
(e.g. `nixpkgs` is a registry lookup; `./nixpkgs` is a relative to the current directory.
path).
- If *path* is used in a `flake.nix`, it is interpreted relative to
the directory containing that `flake.nix`. However, the resolved
path must be in the same tree. For instance, a `flake.nix` in the
root of a tree can use `path:./foo` to access the flake in
subdirectory `foo`, but `path:../bar` is illegal. On the other
hand, a flake in the `/foo` directory of a tree can use
`path:../bar` to refer to the flake in `/bar`.
Path inputs can be specified with path values in `flake.nix`. Path values are a syntax for `path` inputs, and they are converted by
1. resolving them into relative paths, relative to the base directory of `flake.nix`
2. escaping URL characters (refer to IETF RFC?)
3. prepending `path:`
Note that the allowed syntax for path values in flake `inputs` may be more restrictive than general Nix, so you may need to use `path:` if your path contains certain special characters. See [Path literals](@docroot@/language/syntax.md#path-literal)
Note that if you omit `path:`, relative paths must start with `.` to
avoid ambiguity with registry lookups (e.g. `nixpkgs` is a registry
lookup; `./nixpkgs` is a relative path).
For example, these are valid path flake references: For example, these are valid path flake references:
* `path:/home/user/sub/dir` * `path:/home/user/sub/dir`
* `/home/user/sub/dir` (if `dir/flake.nix` is *not* in a git repository) * `/home/user/sub/dir` (if `dir/flake.nix` is *not* in a git repository)
* `./sub/dir` (when used on the command line and `dir/flake.nix` is *not* in a git repository) * `path:sub/dir`
* `./sub/dir`
* `path:../parent`
* `git`: Git repositories. The location of the repository is specified * `git`: Git repositories. The location of the repository is specified
by the attribute `url`. by the attribute `url`.

View file

@ -1,5 +1,4 @@
{ lib { lib
, stdenv
, mkMesonExecutable , mkMesonExecutable
, nix-store , nix-store
@ -99,10 +98,6 @@ mkMesonExecutable (finalAttrs: {
mesonFlags = [ mesonFlags = [
]; ];
env = lib.optionalAttrs (stdenv.isLinux && !(stdenv.hostPlatform.isStatic && stdenv.system == "aarch64-linux")) {
LDFLAGS = "-fuse-ld=gold";
};
meta = { meta = {
platforms = lib.platforms.unix ++ lib.platforms.windows; platforms = lib.platforms.unix ++ lib.platforms.windows;
}; };

View file

@ -15,7 +15,7 @@ using namespace nix;
struct CmdUpgradeNix : MixDryRun, StoreCommand struct CmdUpgradeNix : MixDryRun, StoreCommand
{ {
Path profileDir; std::filesystem::path profileDir;
CmdUpgradeNix() CmdUpgradeNix()
{ {
@ -64,7 +64,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand
if (profileDir == "") if (profileDir == "")
profileDir = getProfileDir(store); profileDir = getProfileDir(store);
printInfo("upgrading Nix in profile '%s'", profileDir); printInfo("upgrading Nix in profile %s", profileDir);
auto storePath = getLatestNix(store); auto storePath = getLatestNix(store);
@ -93,40 +93,44 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand
{ {
Activity act(*logger, lvlInfo, actUnknown, Activity act(*logger, lvlInfo, actUnknown,
fmt("installing '%s' into profile '%s'...", store->printStorePath(storePath), profileDir)); fmt("installing '%s' into profile %s...", store->printStorePath(storePath), profileDir));
// FIXME: don't call an external process.
runProgram(getNixBin("nix-env").string(), false, runProgram(getNixBin("nix-env").string(), false,
{"--profile", profileDir, "-i", store->printStorePath(storePath), "--no-sandbox"}); {"--profile", profileDir.string(), "-i", store->printStorePath(storePath), "--no-sandbox"});
} }
printInfo(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version); printInfo(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version);
} }
/* Return the profile in which Nix is installed. */ /* Return the profile in which Nix is installed. */
Path getProfileDir(ref<Store> store) std::filesystem::path getProfileDir(ref<Store> store)
{ {
auto whereOpt = ExecutablePath::load().findName(OS_STR("nix-env")); auto whereOpt = ExecutablePath::load().findName(OS_STR("nix-env"));
if (!whereOpt) if (!whereOpt)
throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); throw Error("couldn't figure out how Nix is installed, so I can't upgrade it");
const auto & where = whereOpt->parent_path(); const auto & where = whereOpt->parent_path();
printInfo("found Nix in '%s'", where); printInfo("found Nix in %s", where);
if (hasPrefix(where.string(), "/run/current-system")) if (hasPrefix(where.string(), "/run/current-system"))
throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'");
Path profileDir = where.parent_path().string(); auto profileDir = where.parent_path();
// Resolve profile to /nix/var/nix/profiles/<name> link. // Resolve profile to /nix/var/nix/profiles/<name> link.
while (canonPath(profileDir).find("/profiles/") == std::string::npos && std::filesystem::is_symlink(profileDir)) while (canonPath(profileDir.string()).find("/profiles/") == std::string::npos && std::filesystem::is_symlink(profileDir))
profileDir = readLink(profileDir); profileDir = readLink(profileDir.string());
printInfo("found profile '%s'", profileDir); printInfo("found profile %s", profileDir);
Path userEnv = canonPath(profileDir, true); Path userEnv = canonPath(profileDir.string(), true);
if (where.filename() != "bin" || if (std::filesystem::exists(profileDir / "manifest.json"))
!hasSuffix(userEnv, "user-environment")) throw Error("directory %s is managed by 'nix profile' and currently cannot be upgraded by 'nix upgrade-nix'", profileDir);
throw Error("directory %s does not appear to be part of a Nix profile", where);
if (!std::filesystem::exists(profileDir / "manifest.nix"))
throw Error("directory %s does not appear to be part of a Nix profile", profileDir);
if (!store->isValidPath(store->parseStorePath(userEnv))) if (!store->isValidPath(store->parseStorePath(userEnv)))
throw Error("directory '%s' is not in the Nix store", userEnv); throw Error("directory '%s' is not in the Nix store", userEnv);

View file

@ -82,6 +82,9 @@ nix_store_dep = dependency('nix-store')
# pkgconfig available, are not in a standard location, # pkgconfig available, are not in a standard location,
# and are installed into a version folder. Use the # and are installed into a version folder. Use the
# Perl binary to give hints about perl include dir. # Perl binary to give hints about perl include dir.
#
# Note that until we have a better solution for this, cross-compiling
# the perl bindings does not appear to be possible.
#------------------------------------------------- #-------------------------------------------------
perl_archname = run_command( perl_archname = run_command(
perl, '-e', 'use Config; print $Config{archname};', check: true).stdout() perl, '-e', 'use Config; print $Config{archname};', check: true).stdout()

View file

@ -76,3 +76,21 @@ git -C "$rootRepo" commit -m "Add flake.nix"
storePath=$(nix flake metadata --json "$rootRepo?submodules=1" | jq -r .path) storePath=$(nix flake metadata --json "$rootRepo?submodules=1" | jq -r .path)
[[ -e "$storePath/submodule" ]] [[ -e "$storePath/submodule" ]]
# The root repo may use the submodule repo as an input
# through the relative path. This may change in the future;
# see: https://discourse.nixos.org/t/57783 and #9708.
cat > "$rootRepo"/flake.nix <<EOF
{
inputs.subRepo.url = "git+file:./submodule";
outputs = { ... }: { };
}
EOF
git -C "$rootRepo" add flake.nix
git -C "$rootRepo" commit -m "Add subRepo input"
(
cd "$rootRepo"
# The submodule must be locked to the relative path,
# _not_ the absolute path:
[[ $(nix flake metadata --json | jq -r .locks.nodes.subRepo.locked.url) = "file:./submodule" ]]
)

View file

@ -97,6 +97,9 @@ nix build -o "$TEST_ROOT/result" flake1
nix build -o "$TEST_ROOT/result" "$flake1Dir" nix build -o "$TEST_ROOT/result" "$flake1Dir"
nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir" nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir"
(cd "$flake1Dir" && nix build -o "$TEST_ROOT/result" ".")
(cd "$flake1Dir" && nix build -o "$TEST_ROOT/result" "path:.")
(cd "$flake1Dir" && nix build -o "$TEST_ROOT/result" "git+file:.")
# Test explicit packages.default. # Test explicit packages.default.
nix build -o "$TEST_ROOT/result" "$flake1Dir#default" nix build -o "$TEST_ROOT/result" "$flake1Dir#default"
@ -106,6 +109,15 @@ nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir#default"
nix build -o "$TEST_ROOT/result" "$flake1Dir?ref=HEAD#default" nix build -o "$TEST_ROOT/result" "$flake1Dir?ref=HEAD#default"
nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default"
# Check that relative paths are allowed for git flakes.
# This may change in the future once git submodule support is refined.
# See: https://discourse.nixos.org/t/57783 and #9708.
(
# This `cd` should not be required and is indicative of aforementioned bug.
cd "$flake1Dir/.."
nix build -o "$TEST_ROOT/result" "git+file:./$(basename "$flake1Dir")"
)
# Check that store symlinks inside a flake are not interpreted as flakes. # Check that store symlinks inside a flake are not interpreted as flakes.
nix build -o "$flake1Dir/result" "git+file://$flake1Dir" nix build -o "$flake1Dir/result" "git+file://$flake1Dir"
nix path-info "$flake1Dir/result" nix path-info "$flake1Dir/result"

View file

@ -2,9 +2,6 @@
source ./common.sh source ./common.sh
# FIXME: this test is disabled because relative path flakes are broken. Re-enable this in #10089.
exit 0
requireGit requireGit
flakeFollowsA=$TEST_ROOT/follows/flakeA flakeFollowsA=$TEST_ROOT/follows/flakeA
@ -120,7 +117,7 @@ nix flake lock $flakeFollowsA
[[ $(jq -c .nodes.B.inputs.foobar $flakeFollowsA/flake.lock) = '"foobar"' ]] [[ $(jq -c .nodes.B.inputs.foobar $flakeFollowsA/flake.lock) = '"foobar"' ]]
jq -r -c '.nodes | keys | .[]' $flakeFollowsA/flake.lock | grep "^foobar$" jq -r -c '.nodes | keys | .[]' $flakeFollowsA/flake.lock | grep "^foobar$"
# Ensure a relative path is not allowed to go outside the store path # Check that path: inputs cannot escape from their root.
cat > $flakeFollowsA/flake.nix <<EOF cat > $flakeFollowsA/flake.nix <<EOF
{ {
description = "Flake A"; description = "Flake A";
@ -133,7 +130,28 @@ EOF
git -C $flakeFollowsA add flake.nix git -C $flakeFollowsA add flake.nix
expect 1 nix flake lock $flakeFollowsA 2>&1 | grep 'points outside' expect 1 nix flake lock $flakeFollowsA 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode'
expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist'
# Test relative non-flake inputs.
cat > $flakeFollowsA/flake.nix <<EOF
{
description = "Flake A";
inputs = {
E.flake = false;
E.url = "./foo.nix"; # test relative paths without 'path:'
};
outputs = { E, ... }: { e = import E; };
}
EOF
echo 123 > $flakeFollowsA/foo.nix
git -C $flakeFollowsA add flake.nix foo.nix
nix flake lock $flakeFollowsA
[[ $(nix eval --json $flakeFollowsA#e) = 123 ]]
# Non-existant follows should print a warning. # Non-existant follows should print a warning.
cat >$flakeFollowsA/flake.nix <<EOF cat >$flakeFollowsA/flake.nix <<EOF
@ -338,6 +356,6 @@ json=$(nix flake metadata "$flakeFollowsCustomUrlA" --json)
rm "$flakeFollowsCustomUrlA"/flake.lock rm "$flakeFollowsCustomUrlA"/flake.lock
# if override-input is specified, lock "original" entry should contain original url # if override-input is specified, lock "original" entry should contain original url
json=$(nix flake metadata "$flakeFollowsCustomUrlA" --override-input B/C "path:./flakeB/flakeD" --json) json=$(nix flake metadata "$flakeFollowsCustomUrlA" --override-input B/C "$flakeFollowsCustomUrlD" --json)
echo "$json" | jq .locks.nodes.C.original echo "$json" | jq .locks.nodes.C.original
[[ $(echo "$json" | jq -r .locks.nodes.C.original.path) = './flakeC' ]] [[ $(echo "$json" | jq -r .locks.nodes.C.original.path) = './flakeC' ]]

View file

@ -27,6 +27,7 @@ suites += {
'shebang.sh', 'shebang.sh',
'commit-lock-file-summary.sh', 'commit-lock-file-summary.sh',
'non-flake-inputs.sh', 'non-flake-inputs.sh',
'relative-paths.sh',
], ],
'workdir': meson.current_source_dir(), 'workdir': meson.current_source_dir(),
} }

View file

@ -0,0 +1,97 @@
#!/usr/bin/env bash
source ./common.sh
requireGit
rootFlake="$TEST_ROOT/flake1"
subflake0="$rootFlake/sub0"
subflake1="$rootFlake/sub1"
subflake2="$rootFlake/sub2"
rm -rf "$rootFlake"
mkdir -p "$rootFlake" "$subflake0" "$subflake1" "$subflake2"
cat > "$rootFlake/flake.nix" <<EOF
{
inputs.sub0.url = ./sub0;
outputs = { self, sub0 }: {
x = 2;
y = self.x * sub0.x;
};
}
EOF
cat > "$subflake0/flake.nix" <<EOF
{
outputs = { self }: {
x = 7;
};
}
EOF
[[ $(nix eval "$rootFlake#x") = 2 ]]
[[ $(nix eval "$rootFlake#y") = 14 ]]
cat > "$subflake1/flake.nix" <<EOF
{
inputs.root.url = "../";
outputs = { self, root }: {
x = 3;
y = self.x * root.x;
};
}
EOF
[[ $(nix eval "$rootFlake?dir=sub1#y") = 6 ]]
git init "$rootFlake"
git -C "$rootFlake" add flake.nix sub0/flake.nix sub1/flake.nix
[[ $(nix eval "$subflake1#y") = 6 ]]
cat > "$subflake2/flake.nix" <<EOF
{
inputs.root.url = ./..;
inputs.sub1.url = "../sub1";
outputs = { self, root, sub1 }: {
x = 5;
y = self.x * sub1.x;
};
}
EOF
git -C "$rootFlake" add flake.nix sub2/flake.nix
[[ $(nix eval "$subflake2#y") = 15 ]]
# Make sure that this still works after commiting the lock file.
git -C "$rootFlake" add sub2/flake.lock
[[ $(nix eval "$subflake2#y") = 15 ]]
# Make sure there are no content locks for relative path flakes.
(! grep "$TEST_ROOT" "$subflake2/flake.lock")
if ! isTestOnNixOS; then
(! grep "$NIX_STORE_DIR" "$subflake2/flake.lock")
fi
(! grep narHash "$subflake2/flake.lock")
# Test circular relative path flakes. FIXME: doesn't work at the moment.
if false; then
cat > "$rootFlake/flake.nix" <<EOF
{
inputs.sub1.url = "./sub1";
inputs.sub2.url = "./sub1";
outputs = { self, sub1, sub2 }: {
x = 2;
y = self.x * sub1.x * sub2.x;
z = sub1.y * sub2.y;
};
}
EOF
[[ $(nix eval "$rootFlake#x") = 30 ]]
[[ $(nix eval "$rootFlake#z") = 90 ]]
fi

View file

@ -31,5 +31,14 @@ echo 456 > "$flake1Dir"/x.nix
[[ $(nix eval --json "$flake2Dir#x" --override-input flake1 "$TEST_ROOT/flake1") = 456 ]] [[ $(nix eval --json "$flake2Dir#x" --override-input flake1 "$TEST_ROOT/flake1") = 456 ]]
# Dirty overrides require --allow-dirty-locks.
expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" | expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" |
grepQuiet "cannot write lock file.*because it has an unlocked input" grepQuiet "Will not write lock file.*because it has an unlocked input"
nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks
# Using a lock file with a dirty lock requires --allow-dirty-locks as well.
expectStderr 1 nix eval "$flake2Dir#x" |
grepQuiet "Lock file contains unlocked input"
[[ $(nix eval "$flake2Dir#x" --allow-dirty-locks) = 456 ]]

View file

@ -173,13 +173,21 @@ nix-env -q '*' | grepQuiet bar-0.1.1
# Test priorities: foo-0.1 has a lower priority than foo-1.0, so it # Test priorities: foo-0.1 has a lower priority than foo-1.0, so it
# should be possible to install both without a collision. Also test # should be possible to install both without a collision. Also test
# --set-flag priority to manually override the declared priorities. # '-i --priority' and '--set-flag priority' to manually override the
# declared priorities.
nix-env -e '*' nix-env -e '*'
nix-env -i foo-0.1 foo-1.0 nix-env -i foo-0.1 foo-1.0
[ "$($profiles/test/bin/foo)" = "foo-1.0" ] [ "$($profiles/test/bin/foo)" = "foo-1.0" ]
nix-env --set-flag priority 1 foo-0.1 nix-env --set-flag priority 1 foo-0.1
[ "$($profiles/test/bin/foo)" = "foo-0.1" ] [ "$($profiles/test/bin/foo)" = "foo-0.1" ]
# Priorities can be overridden with the --priority flag
nix-env -e '*'
nix-env -i foo-1.0
[ "$($profiles/test/bin/foo)" = "foo-1.0" ]
nix-env -i --priority 1 foo-0.1
[ "$($profiles/test/bin/foo)" = "foo-0.1" ]
# Test nix-env --set. # Test nix-env --set.
nix-env --set $outPath10 nix-env --set $outPath10
[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] [ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ]

View file

@ -39,11 +39,14 @@
client.succeed("chmod 600 /root/.ssh/id_ed25519") client.succeed("chmod 600 /root/.ssh/id_ed25519")
# Install the SSH key on the builders. # Install the SSH key on the builders.
client.wait_for_unit("network.target") client.wait_for_unit("network-online.target")
remote.succeed("mkdir -p -m 700 /root/.ssh") remote.succeed("mkdir -p -m 700 /root/.ssh")
remote.copy_from_host("key.pub", "/root/.ssh/authorized_keys") remote.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
remote.wait_for_unit("sshd") remote.wait_for_unit("sshd")
remote.wait_for_unit("multi-user.target")
remote.wait_for_unit("network-online.target")
client.wait_for_unit("network-online.target")
client.succeed(f"ssh -o StrictHostKeyChecking=no {remote.name} 'echo hello world'") client.succeed(f"ssh -o StrictHostKeyChecking=no {remote.name} 'echo hello world'")
remote.succeed(""" remote.succeed("""

View file

@ -161,7 +161,9 @@ in
github.succeed("cat /var/log/httpd/*.log >&2") github.succeed("cat /var/log/httpd/*.log >&2")
github.wait_for_unit("httpd.service") github.wait_for_unit("httpd.service")
github.wait_for_unit("network-online.target")
client.wait_for_unit("network-online.target")
client.succeed("curl -v https://github.com/ >&2") client.succeed("curl -v https://github.com/ >&2")
out = client.succeed("nix registry list") out = client.succeed("nix registry list")
print(out) print(out)

View file

@ -48,7 +48,10 @@ in {
server.succeed("mkdir -m 700 /root/.ssh") server.succeed("mkdir -m 700 /root/.ssh")
server.copy_from_host("key.pub", "/root/.ssh/authorized_keys") server.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
server.wait_for_unit("sshd") server.wait_for_unit("sshd")
client.wait_for_unit("network.target") server.wait_for_unit("multi-user.target")
server.wait_for_unit("network-online.target")
client.wait_for_unit("network-online.target")
client.succeed(f"ssh -o StrictHostKeyChecking=no {server.name} 'echo hello world'") client.succeed(f"ssh -o StrictHostKeyChecking=no {server.name} 'echo hello world'")
# Copy the closure of package A from the client to the server. # Copy the closure of package A from the client to the server.

View file

@ -56,7 +56,10 @@ in {
start_all() start_all()
server.wait_for_unit("sshd") server.wait_for_unit("sshd")
client.wait_for_unit("network.target") server.wait_for_unit("multi-user.target")
server.wait_for_unit("network-online.target")
client.wait_for_unit("network-online.target")
client.wait_for_unit("getty@tty1.service") client.wait_for_unit("getty@tty1.service")
# Either the prompt: ]# # Either the prompt: ]#
# or an OCR misreading of it: 1# # or an OCR misreading of it: 1#

View file

@ -37,6 +37,7 @@ in {
testScript = { nodes }: '' testScript = { nodes }: ''
cache.wait_for_unit("harmonia.service") cache.wait_for_unit("harmonia.service")
cache.wait_for_unit("network-online.target")
machine.succeed("mkdir -p /etc/containers") machine.succeed("mkdir -p /etc/containers")
machine.succeed("""echo '{"default":[{"type":"insecureAcceptAnything"}]}' > /etc/containers/policy.json""") machine.succeed("""echo '{"default":[{"type":"insecureAcceptAnything"}]}' > /etc/containers/policy.json""")

View file

@ -102,6 +102,7 @@ in
}; };
testScript = { nodes, ... }: '' testScript = { nodes, ... }: ''
http_dns.wait_for_unit("network-online.target")
http_dns.wait_for_unit("nginx") http_dns.wait_for_unit("nginx")
http_dns.wait_for_open_port(80) http_dns.wait_for_open_port(80)
http_dns.wait_for_unit("unbound") http_dns.wait_for_unit("unbound")
@ -109,6 +110,7 @@ in
client.start() client.start()
client.wait_for_unit('multi-user.target') client.wait_for_unit('multi-user.target')
client.wait_for_unit('network-online.target')
with subtest("can fetch data from a remote server outside sandbox"): with subtest("can fetch data from a remote server outside sandbox"):
client.succeed("nix --version >&2") client.succeed("nix --version >&2")

View file

@ -89,10 +89,13 @@ in
client.succeed("chmod 600 /root/.ssh/id_ed25519") client.succeed("chmod 600 /root/.ssh/id_ed25519")
# Install the SSH key on the builder. # Install the SSH key on the builder.
client.wait_for_unit("network.target") client.wait_for_unit("network-online.target")
builder.succeed("mkdir -p -m 700 /root/.ssh") builder.succeed("mkdir -p -m 700 /root/.ssh")
builder.copy_from_host("key.pub", "/root/.ssh/authorized_keys") builder.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
builder.wait_for_unit("sshd") builder.wait_for_unit("sshd")
builder.wait_for_unit("multi-user.target")
builder.wait_for_unit("network-online.target")
client.succeed(f"ssh -o StrictHostKeyChecking=no {builder.name} 'echo hello world'") client.succeed(f"ssh -o StrictHostKeyChecking=no {builder.name} 'echo hello world'")
# Perform a build # Perform a build

View file

@ -112,11 +112,12 @@ in
client.succeed("chmod 600 /root/.ssh/id_ed25519") client.succeed("chmod 600 /root/.ssh/id_ed25519")
# Install the SSH key on the builders. # Install the SSH key on the builders.
client.wait_for_unit("network.target") client.wait_for_unit("network-online.target")
for builder in [builder1, builder2]: for builder in [builder1, builder2]:
builder.succeed("mkdir -p -m 700 /root/.ssh") builder.succeed("mkdir -p -m 700 /root/.ssh")
builder.copy_from_host("key.pub", "/root/.ssh/authorized_keys") builder.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
builder.wait_for_unit("sshd") builder.wait_for_unit("sshd")
builder.wait_for_unit("network-online.target")
# Make sure the builder can handle our login correctly # Make sure the builder can handle our login correctly
builder.wait_for_unit("multi-user.target") builder.wait_for_unit("multi-user.target")
# Make sure there's no funny business on the client either # Make sure there's no funny business on the client either

View file

@ -52,12 +52,15 @@ in {
# Create a binary cache. # Create a binary cache.
server.wait_for_unit("minio") server.wait_for_unit("minio")
server.wait_for_unit("network-online.target")
server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4") server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4")
server.succeed("mc mb minio/my-cache") server.succeed("mc mb minio/my-cache")
server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}") server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}")
client.wait_for_unit("network-online.target")
# Test fetchurl on s3:// URLs while we're at it. # Test fetchurl on s3:// URLs while we're at it.
client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000&region=eu-west-1\"; }'") client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000&region=eu-west-1\"; }'")

View file

@ -122,6 +122,8 @@ in
start_all() start_all()
sourcehut.wait_for_unit("httpd.service") sourcehut.wait_for_unit("httpd.service")
sourcehut.wait_for_unit("network-online.target")
client.wait_for_unit("network-online.target")
client.succeed("curl -v https://git.sr.ht/ >&2") client.succeed("curl -v https://git.sr.ht/ >&2")
client.succeed("nix registry list | grep nixpkgs") client.succeed("nix registry list | grep nixpkgs")