diff --git a/Makefile b/Makefile
index 42d11638b..8675c9925 100644
--- a/Makefile
+++ b/Makefile
@@ -36,4 +36,4 @@ endif
include mk/lib.mk
-GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++20 -I src
+GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++2a -I src
diff --git a/doc/manual/custom.css b/doc/manual/custom.css
index 69d48d4a7..b90f5423f 100644
--- a/doc/manual/custom.css
+++ b/doc/manual/custom.css
@@ -5,3 +5,7 @@ h1:not(:first-of-type) {
h2 {
margin-top: 1em;
}
+
+.hljs-meta {
+ user-select: none;
+}
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index 4e559c352..16fd20d35 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -51,13 +51,13 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
@rm -rf $@
$(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix { toplevel = builtins.readFile $<; }'
- # @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
+ @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
$(trace-gen) sed -i $@.tmp/*.md -e 's^@docroot@^../..^g'
@mv $@.tmp $@
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
- # @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
+ @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' \
| sed -e 's^@docroot@^..^g'>> $@.tmp
@mv $@.tmp $@
@@ -72,7 +72,7 @@ $(d)/conf-file.json: $(bindir)/nix
$(d)/src/language/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/language/builtins-prefix.md $(bindir)/nix
@cat doc/manual/src/language/builtins-prefix.md > $@.tmp
- # @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
+ @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' \
| sed -e 's^@docroot@^..^g' >> $@.tmp
@cat doc/manual/src/language/builtins-suffix.md >> $@.tmp
diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md
index bb85a6b07..c5d38db47 100644
--- a/doc/manual/src/command-ref/env-common.md
+++ b/doc/manual/src/command-ref/env-common.md
@@ -91,3 +91,16 @@ Most Nix commands interpret the following environment variables:
variable sets the initial size of the heap in bytes. It defaults to
384 MiB. Setting it to a low value reduces memory consumption, but
will increase runtime due to the overhead of garbage collection.
+
+## XDG Base Directory
+
+New Nix commands conform to the [XDG Base Directory Specification], and use the following environment variables to determine locations of various state and configuration files:
+
+- [`XDG_CONFIG_HOME`]{#env-XDG_CONFIG_HOME} (default `~/.config`)
+- [`XDG_STATE_HOME`]{#env-XDG_STATE_HOME} (default `~/.local/state`)
+- [`XDG_CACHE_HOME`]{#env-XDG_CACHE_HOME} (default `~/.cache`)
+
+Classic Nix commands can also be made to follow this standard using the [`use-xdg-base-directories`] configuration option.
+
+[XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
+[`use-xdg-base-directories`]: ../command-ref/conf-file.md#conf-use-xdg-base-directories
\ No newline at end of file
diff --git a/doc/manual/src/language/index.md b/doc/manual/src/language/index.md
index 31300631c..3eabe1a02 100644
--- a/doc/manual/src/language/index.md
+++ b/doc/manual/src/language/index.md
@@ -91,7 +91,7 @@ This is an incomplete overview of language features, by example.
- `"hello ${ { a = "world" }.a }"`
+ `"hello ${ { a = "world"; }.a }"`
`"1 2 ${toString 3}"`
diff --git a/flake.nix b/flake.nix
index b2392c6a6..c4d6c863f 100644
--- a/flake.nix
+++ b/flake.nix
@@ -500,6 +500,8 @@
};
# System tests.
+ tests.authorization = runNixOSTestFor "x86_64-linux" ./tests/nixos/authorization.nix;
+
tests.remoteBuilds = runNixOSTestFor "x86_64-linux" ./tests/nixos/remote-builds.nix;
tests.nix-copy-closure = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy-closure.nix;
diff --git a/mk/libraries.mk b/mk/libraries.mk
index 6541775f3..02e4d47f9 100644
--- a/mk/libraries.mk
+++ b/mk/libraries.mk
@@ -67,6 +67,7 @@ define build-library
$(1)_LDFLAGS_USE :=
$(1)_LDFLAGS_USE_INSTALLED :=
+ $(1)_LIB_CLOSURE := $(1)
$$(eval $$(call create-dir, $$(_d)))
@@ -128,10 +129,12 @@ define build-library
+$$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$^
$$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
- $(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)
+ $(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE))
$(1)_INSTALL_PATH := $$(libdir)/$$($(1)_NAME).a
+ $(1)_LIB_CLOSURE += $$($(1)_LIBS)
+
endif
$(1)_LDFLAGS_USE += $$($(1)_LDFLAGS_PROPAGATED)
diff --git a/mk/programs.mk b/mk/programs.mk
index 204409332..1ee1d3fa5 100644
--- a/mk/programs.mk
+++ b/mk/programs.mk
@@ -30,7 +30,7 @@ define build-program
_d := $(buildprefix)$$($(1)_DIR)
_srcs := $$(sort $$(foreach src, $$($(1)_SOURCES), $$(src)))
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
- _libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
+ _libs := $$(foreach lib, $$($(1)_LIBS), $$(foreach lib2, $$($$(lib)_LIB_CLOSURE), $$($$(lib2)_PATH)))
$(1)_PATH := $$(_d)/$$($(1)_NAME)
$$(eval $$(call create-dir, $$(_d)))
@@ -58,7 +58,7 @@ define build-program
else
$(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_PATH) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
- install -t $(DESTDIR)$$($(1)_INSTALL_DIR) $$<
+ +$$(trace-install) install -t $(DESTDIR)$$($(1)_INSTALL_DIR) $$<
endif
endif
diff --git a/perl/Makefile b/perl/Makefile
index 2d759e6fc..c2c95f255 100644
--- a/perl/Makefile
+++ b/perl/Makefile
@@ -1,6 +1,6 @@
makefiles = local.mk
-GLOBAL_CXXFLAGS += -g -Wall -std=c++20 -I ../src
+GLOBAL_CXXFLAGS += -g -Wall -std=c++2a -I ../src
-include Makefile.config
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index f149ea0d7..7c66538b0 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -136,7 +136,7 @@ EOF
cat <&2
exit 1
@@ -196,7 +198,7 @@ fi
# Install an SSL certificate bundle.
if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
"$nix/bin/nix-env" -i "$cacert"
- export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt"
+ export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ssl/certs/ca-bundle.crt"
fi
# Subscribe the user to the Nixpkgs channel and fetch it.
@@ -214,8 +216,8 @@ fi
added=
p=
-p_sh=$HOME/.nix-profile/etc/profile.d/nix.sh
-p_fish=$HOME/.nix-profile/etc/profile.d/nix.fish
+p_sh=$NIX_LINK/etc/profile.d/nix.sh
+p_fish=$NIX_LINK/etc/profile.d/nix.fish
if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
# Make the shell source nix.sh during login.
for i in .bash_profile .bash_login .profile; do
diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in
index 0a47571ac..235536c65 100644
--- a/scripts/nix-profile-daemon.sh.in
+++ b/scripts/nix-profile-daemon.sh.in
@@ -2,7 +2,33 @@
if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi
__ETC_PROFILE_NIX_SOURCED=1
-export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
+NIX_LINK=$HOME/.nix-profile
+if [ -n "$XDG_STATE_HOME" ]; then
+ NIX_LINK_NEW="$XDG_STATE_HOME/nix/profile"
+else
+ NIX_LINK_NEW=$HOME/.local/state/nix/profile
+fi
+if ! [ -e "$NIX_LINK" ]; then
+ NIX_LINK="$NIX_LINK_NEW"
+else
+ if [ -t 2 ] && [ -e "$NIX_LINK_NEW" ]; then
+ warning="\033[1;35mwarning:\033[0m"
+ printf "$warning Both %s and legacy %s exist; using the latter.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2
+ if [ "$(realpath "$NIX_LINK")" = "$(realpath "$NIX_LINK_NEW")" ]; then
+ printf " Since the profiles match, you can safely delete either of them.\n" 1>&2
+ else
+ # This should be an exceptionally rare occasion: the only way to get it would be to
+ # 1. Update to newer Nix;
+ # 2. Remove .nix-profile;
+ # 3. Set the $NIX_LINK_NEW to something other than the default user profile;
+ # 4. Roll back to older Nix.
+ # If someone did all that, they can probably figure out how to migrate the profile.
+ printf "$warning Profiles do not match. You should manually migrate from %s to %s.\n" "$NIX_LINK" "$NIX_LINK_NEW" 1>&2
+ fi
+ fi
+fi
+
+export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK"
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then
@@ -34,4 +60,5 @@ else
unset -f check_nix_profiles
fi
-export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
+export PATH="$NIX_LINK/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
+unset NIX_LINK
diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in
index 5636085d4..264d9a8e2 100644
--- a/scripts/nix-profile.sh.in
+++ b/scripts/nix-profile.sh.in
@@ -2,11 +2,35 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then
# Set up the per-user profile.
- NIX_LINK=$HOME/.nix-profile
+ NIX_LINK="$HOME/.nix-profile"
+ if [ -n "$XDG_STATE_HOME" ]; then
+ NIX_LINK_NEW="$XDG_STATE_HOME/nix/profile"
+ else
+ NIX_LINK_NEW="$HOME/.local/state/nix/profile"
+ fi
+ if ! [ -e "$NIX_LINK" ]; then
+ NIX_LINK="$NIX_LINK_NEW"
+ else
+ if [ -t 2 ] && [ -e "$NIX_LINK_NEW" ]; then
+ warning="\033[1;35mwarning:\033[0m"
+ printf "$warning Both %s and legacy %s exist; using the latter.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2
+ if [ "$(realpath "$NIX_LINK")" = "$(realpath "$NIX_LINK_NEW")" ]; then
+ printf " Since the profiles match, you can safely delete either of them.\n" 1>&2
+ else
+ # This should be an exceptionally rare occasion: the only way to get it would be to
+ # 1. Update to newer Nix;
+ # 2. Remove .nix-profile;
+ # 3. Set the $NIX_LINK_NEW to something other than the default user profile;
+ # 4. Roll back to older Nix.
+ # If someone did all that, they can probably figure out how to migrate the profile.
+ printf "$warning Profiles do not match. You should manually migrate from %s to %s.\n" "$NIX_LINK" "$NIX_LINK_NEW" 1>&2
+ fi
+ fi
+ fi
# Set up environment.
# This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix
- export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
+ export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK"
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
if [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
@@ -31,5 +55,5 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then
fi
export PATH="$NIX_LINK/bin:$PATH"
- unset NIX_LINK
+ unset NIX_LINK NIX_LINK_NEW
fi
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index 2b2350734..066143eb1 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -127,6 +127,16 @@ ref EvalCommand::getEvalState()
return ref(evalState);
}
+MixOperateOnOptions::MixOperateOnOptions()
+{
+ addFlag({
+ .longName = "derivation",
+ .description = "Operate on the [store derivation](../../glossary.md#gloss-store-derivation) rather than its outputs.",
+ .category = installablesCategory,
+ .handler = {&operateOn, OperateOn::Derivation},
+ });
+}
+
BuiltPathsCommand::BuiltPathsCommand(bool recursive)
: recursive(recursive)
{
diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh
index fac70e6bd..a579edd6f 100644
--- a/src/libcmd/command.hh
+++ b/src/libcmd/command.hh
@@ -96,9 +96,6 @@ struct SourceExprCommand : virtual Args, MixFlakeOptions
std::optional expr;
bool readOnlyMode = false;
- // FIXME: move this; not all commands (e.g. 'nix run') use it.
- OperateOn operateOn = OperateOn::Output;
-
SourceExprCommand(bool supportReadOnlyMode = false);
std::vector> parseInstallables(
@@ -153,8 +150,15 @@ private:
std::string _installable{"."};
};
+struct MixOperateOnOptions : virtual Args
+{
+ OperateOn operateOn = OperateOn::Output;
+
+ MixOperateOnOptions();
+};
+
/* A command that operates on zero or more store paths. */
-struct BuiltPathsCommand : public InstallablesCommand
+struct BuiltPathsCommand : InstallablesCommand, virtual MixOperateOnOptions
{
private:
diff --git a/src/libcmd/installable-derived-path.cc b/src/libcmd/installable-derived-path.cc
new file mode 100644
index 000000000..a9921b901
--- /dev/null
+++ b/src/libcmd/installable-derived-path.cc
@@ -0,0 +1,70 @@
+#include "installable-derived-path.hh"
+#include "derivations.hh"
+
+namespace nix {
+
+std::string InstallableDerivedPath::what() const
+{
+ return derivedPath.to_string(*store);
+}
+
+DerivedPathsWithInfo InstallableDerivedPath::toDerivedPaths()
+{
+ return {{.path = derivedPath, .info = {} }};
+}
+
+std::optional InstallableDerivedPath::getStorePath()
+{
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & bfd) {
+ return bfd.drvPath;
+ },
+ [&](const DerivedPath::Opaque & bo) {
+ return bo.path;
+ },
+ }, derivedPath.raw());
+}
+
+InstallableDerivedPath InstallableDerivedPath::parse(
+ ref store,
+ std::string_view prefix,
+ ExtendedOutputsSpec extendedOutputsSpec)
+{
+ auto derivedPath = std::visit(overloaded {
+ // If the user did not use ^, we treat the output more liberally.
+ [&](const ExtendedOutputsSpec::Default &) -> DerivedPath {
+ // First, we accept a symlink chain or an actual store path.
+ auto storePath = store->followLinksToStorePath(prefix);
+ // Second, we see if the store path ends in `.drv` to decide what sort
+ // of derived path they want.
+ //
+ // This handling predates the `^` syntax. The `^*` in
+ // `/nix/store/hash-foo.drv^*` unambiguously means "do the
+ // `DerivedPath::Built` case", so plain `/nix/store/hash-foo.drv` could
+ // also unambiguously mean "do the DerivedPath::Opaque` case".
+ //
+ // Issue #7261 tracks reconsidering this `.drv` dispatching.
+ return storePath.isDerivation()
+ ? (DerivedPath) DerivedPath::Built {
+ .drvPath = std::move(storePath),
+ .outputs = OutputsSpec::All {},
+ }
+ : (DerivedPath) DerivedPath::Opaque {
+ .path = std::move(storePath),
+ };
+ },
+ // If the user did use ^, we just do exactly what is written.
+ [&](const ExtendedOutputsSpec::Explicit & outputSpec) -> DerivedPath {
+ return DerivedPath::Built {
+ .drvPath = store->parseStorePath(prefix),
+ .outputs = outputSpec,
+ };
+ },
+ }, extendedOutputsSpec.raw());
+ return InstallableDerivedPath {
+ store,
+ std::move(derivedPath),
+ };
+}
+
+}
diff --git a/src/libcmd/installable-derived-path.hh b/src/libcmd/installable-derived-path.hh
new file mode 100644
index 000000000..042878b91
--- /dev/null
+++ b/src/libcmd/installable-derived-path.hh
@@ -0,0 +1,28 @@
+#pragma once
+
+#include "installables.hh"
+
+namespace nix {
+
+struct InstallableDerivedPath : Installable
+{
+ ref store;
+ DerivedPath derivedPath;
+
+ InstallableDerivedPath(ref store, DerivedPath && derivedPath)
+ : store(store), derivedPath(std::move(derivedPath))
+ { }
+
+ std::string what() const override;
+
+ DerivedPathsWithInfo toDerivedPaths() override;
+
+ std::optional getStorePath() override;
+
+ static InstallableDerivedPath parse(
+ ref store,
+ std::string_view prefix,
+ ExtendedOutputsSpec extendedOutputsSpec);
+};
+
+}
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index 27c677b7b..50d456aa6 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -1,5 +1,6 @@
#include "globals.hh"
#include "installables.hh"
+#include "installable-derived-path.hh"
#include "outputs-spec.hh"
#include "util.hh"
#include "command.hh"
@@ -167,13 +168,6 @@ SourceExprCommand::SourceExprCommand(bool supportReadOnlyMode)
.handler = {&expr}
});
- addFlag({
- .longName = "derivation",
- .description = "Operate on the [store derivation](../../glossary.md#gloss-store-derivation) rather than its outputs.",
- .category = installablesCategory,
- .handler = {&operateOn, OperateOn::Derivation},
- });
-
if (supportReadOnlyMode) {
addFlag({
.longName = "read-only",
@@ -397,38 +391,6 @@ static StorePath getDeriver(
return *derivers.begin();
}
-struct InstallableStorePath : Installable
-{
- ref store;
- DerivedPath req;
-
- InstallableStorePath(ref store, DerivedPath && req)
- : store(store), req(std::move(req))
- { }
-
- std::string what() const override
- {
- return req.to_string(*store);
- }
-
- DerivedPathsWithInfo toDerivedPaths() override
- {
- return {{req}};
- }
-
- std::optional getStorePath() override
- {
- return std::visit(overloaded {
- [&](const DerivedPath::Built & bfd) {
- return bfd.drvPath;
- },
- [&](const DerivedPath::Opaque & bo) {
- return bo.path;
- },
- }, req.raw());
- }
-};
-
struct InstallableAttrPath : InstallableValue
{
SourceExprCommand & cmd;
@@ -792,41 +754,10 @@ std::vector> SourceExprCommand::parseInstallables(
auto prefix = std::move(prefix_);
auto extendedOutputsSpec = std::move(extendedOutputsSpec_);
- auto found = prefix.find('/');
- if (found != std::string::npos) {
+ if (prefix.find('/') != std::string::npos) {
try {
- auto derivedPath = std::visit(overloaded {
- // If the user did not use ^, we treat the output more liberally.
- [&](const ExtendedOutputsSpec::Default &) -> DerivedPath {
- // First, we accept a symlink chain or an actual store path.
- auto storePath = store->followLinksToStorePath(prefix);
- // Second, we see if the store path ends in `.drv` to decide what sort
- // of derived path they want.
- //
- // This handling predates the `^` syntax. The `^*` in
- // `/nix/store/hash-foo.drv^*` unambiguously means "do the
- // `DerivedPath::Built` case", so plain `/nix/store/hash-foo.drv` could
- // also unambiguously mean "do the DerivedPath::Opaque` case".
- //
- // Issue #7261 tracks reconsidering this `.drv` dispatching.
- return storePath.isDerivation()
- ? (DerivedPath) DerivedPath::Built {
- .drvPath = std::move(storePath),
- .outputs = OutputsSpec::All {},
- }
- : (DerivedPath) DerivedPath::Opaque {
- .path = std::move(storePath),
- };
- },
- // If the user did use ^, we just do exactly what is written.
- [&](const ExtendedOutputsSpec::Explicit & outputSpec) -> DerivedPath {
- return DerivedPath::Built {
- .drvPath = store->parseStorePath(prefix),
- .outputs = outputSpec,
- };
- },
- }, extendedOutputsSpec.raw());
- result.push_back(std::make_shared(store, std::move(derivedPath)));
+ result.push_back(std::make_shared(
+ InstallableDerivedPath::parse(store, prefix, extendedOutputsSpec)));
continue;
} catch (BadStorePath &) {
} catch (...) {
diff --git a/src/libcmd/nix-cmd.pc.in b/src/libcmd/nix-cmd.pc.in
index a21d93f1d..39575f222 100644
--- a/src/libcmd/nix-cmd.pc.in
+++ b/src/libcmd/nix-cmd.pc.in
@@ -6,4 +6,4 @@ Name: Nix
Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Libs: -L${libdir} -lnixcmd
-Cflags: -I${includedir}/nix -std=c++20
+Cflags: -I${includedir}/nix -std=c++2a
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 8e359be60..aa164a9f5 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -2491,7 +2491,7 @@ Strings EvalSettings::getDefaultNixPath()
res.push_back(s ? *s + "=" + p : p);
};
- add(getHome() + "/.nix-defexpr/channels");
+ add(settings.useXDGBaseDirectories ? getStateDir() + "/nix/defexpr/channels" : getHome() + "/.nix-defexpr/channels");
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
add(settings.nixStateDir + "/profiles/per-user/root/channels");
diff --git a/src/libexpr/nix-expr.pc.in b/src/libexpr/nix-expr.pc.in
index 95d452ca8..60ffb5dba 100644
--- a/src/libexpr/nix-expr.pc.in
+++ b/src/libexpr/nix-expr.pc.in
@@ -7,4 +7,4 @@ Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Requires: nix-store bdw-gc
Libs: -L${libdir} -lnixexpr
-Cflags: -I${includedir}/nix -std=c++20
+Cflags: -I${includedir}/nix -std=c++2a
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
index 387fa4927..793782ad1 100644
--- a/src/libexpr/nixexpr.hh
+++ b/src/libexpr/nixexpr.hh
@@ -185,7 +185,7 @@ struct ExprString : Expr
{
std::string s;
Value v;
- ExprString(std::string s) : s(std::move(s)) { v.mkString(this->s.data()); };
+ ExprString(std::string &&s) : s(std::move(s)) { v.mkString(this->s.data()); };
Value * maybeThunk(EvalState & state, Env & env) override;
COMMON_METHODS
};
@@ -236,7 +236,7 @@ struct ExprSelect : Expr
PosIdx pos;
Expr * e, * def;
AttrPath attrPath;
- ExprSelect(const PosIdx & pos, Expr * e, const AttrPath & attrPath, Expr * def) : pos(pos), e(e), def(def), attrPath(attrPath) { };
+ ExprSelect(const PosIdx & pos, Expr * e, const AttrPath && attrPath, Expr * def) : pos(pos), e(e), def(def), attrPath(std::move(attrPath)) { };
ExprSelect(const PosIdx & pos, Expr * e, Symbol name) : pos(pos), e(e), def(0) { attrPath.push_back(AttrName(name)); };
PosIdx getPos() const override { return pos; }
COMMON_METHODS
@@ -246,7 +246,7 @@ struct ExprOpHasAttr : Expr
{
Expr * e;
AttrPath attrPath;
- ExprOpHasAttr(Expr * e, const AttrPath & attrPath) : e(e), attrPath(attrPath) { };
+ ExprOpHasAttr(Expr * e, const AttrPath && attrPath) : e(e), attrPath(std::move(attrPath)) { };
PosIdx getPos() const override { return e->getPos(); }
COMMON_METHODS
};
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index e7e819576..faa8a2e60 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -90,7 +90,7 @@ static void dupAttr(const EvalState & state, Symbol attr, const PosIdx pos, cons
}
-static void addAttr(ExprAttrs * attrs, AttrPath & attrPath,
+static void addAttr(ExprAttrs * attrs, AttrPath && attrPath,
Expr * e, const PosIdx pos, const nix::EvalState & state)
{
AttrPath::iterator i;
@@ -188,7 +188,7 @@ static Formals * toFormals(ParseData & data, ParserFormals * formals,
static Expr * stripIndentation(const PosIdx pos, SymbolTable & symbols,
- std::vector>> & es)
+ std::vector>> && es)
{
if (es.empty()) return new ExprString("");
@@ -268,7 +268,7 @@ static Expr * stripIndentation(const PosIdx pos, SymbolTable & symbols,
s2 = std::string(s2, 0, p + 1);
}
- es2->emplace_back(i->first, new ExprString(s2));
+ es2->emplace_back(i->first, new ExprString(std::move(s2)));
};
for (; i != es.end(); ++i, --n) {
std::visit(overloaded { trimExpr, trimString }, i->second);
@@ -413,7 +413,7 @@ expr_op
| expr_op OR expr_op { $$ = new ExprOpOr(makeCurPos(@2, data), $1, $3); }
| expr_op IMPL expr_op { $$ = new ExprOpImpl(makeCurPos(@2, data), $1, $3); }
| expr_op UPDATE expr_op { $$ = new ExprOpUpdate(makeCurPos(@2, data), $1, $3); }
- | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, *$3); }
+ | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, std::move(*$3)); delete $3; }
| expr_op '+' expr_op
{ $$ = new ExprConcatStrings(makeCurPos(@2, data), false, new std::vector >({{makeCurPos(@1, data), $1}, {makeCurPos(@3, data), $3}})); }
| expr_op '-' expr_op { $$ = new ExprCall(makeCurPos(@2, data), new ExprVar(data->symbols.create("__sub")), {$1, $3}); }
@@ -436,14 +436,14 @@ expr_app
expr_select
: expr_simple '.' attrpath
- { $$ = new ExprSelect(CUR_POS, $1, *$3, 0); }
+ { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), nullptr); delete $3; }
| expr_simple '.' attrpath OR_KW expr_select
- { $$ = new ExprSelect(CUR_POS, $1, *$3, $5); }
+ { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), $5); delete $3; }
| /* Backwards compatibility: because Nixpkgs has a rarely used
function named ‘or’, allow stuff like ‘map or [...]’. */
expr_simple OR_KW
{ $$ = new ExprCall(CUR_POS, $1, {new ExprVar(CUR_POS, data->symbols.create("or"))}); }
- | expr_simple { $$ = $1; }
+ | expr_simple
;
expr_simple
@@ -458,7 +458,8 @@ expr_simple
| FLOAT { $$ = new ExprFloat($1); }
| '"' string_parts '"' { $$ = $2; }
| IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
- $$ = stripIndentation(CUR_POS, data->symbols, *$2);
+ $$ = stripIndentation(CUR_POS, data->symbols, std::move(*$2));
+ delete $2;
}
| path_start PATH_END { $$ = $1.e; }
| path_start string_parts_interpolated PATH_END {
@@ -472,7 +473,7 @@ expr_simple
$$ = new ExprCall(CUR_POS,
new ExprVar(data->symbols.create("__findFile")),
{new ExprVar(data->symbols.create("__nixPath")),
- new ExprString(path)});
+ new ExprString(std::move(path))});
}
| URI {
static bool noURLLiterals = settings.isExperimentalFeatureEnabled(Xp::NoUrlLiterals);
@@ -546,7 +547,7 @@ ind_string_parts
;
binds
- : binds attrpath '=' expr ';' { $$ = $1; addAttr($$, *$2, $4, makeCurPos(@2, data), data->state); }
+ : binds attrpath '=' expr ';' { $$ = $1; addAttr($$, std::move(*$2), $4, makeCurPos(@2, data), data->state); delete $2; }
| binds INHERIT attrs ';'
{ $$ = $1;
for (auto & i : *$3) {
@@ -555,6 +556,7 @@ binds
auto pos = makeCurPos(@3, data);
$$->attrs.emplace(i.symbol, ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true));
}
+ delete $3;
}
| binds INHERIT '(' expr ')' attrs ';'
{ $$ = $1;
@@ -564,6 +566,7 @@ binds
dupAttr(data->state, i.symbol, makeCurPos(@6, data), $$->attrs[i.symbol].pos);
$$->attrs.emplace(i.symbol, ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data)));
}
+ delete $6;
}
| { $$ = new ExprAttrs(makeCurPos(@0, data)); }
;
@@ -609,7 +612,7 @@ attrpath
;
attr
- : ID { $$ = $1; }
+ : ID
| OR_KW { $$ = {"or", 2}; }
;
@@ -625,9 +628,9 @@ expr_list
formals
: formal ',' formals
- { $$ = $3; $$->formals.push_back(*$1); }
+ { $$ = $3; $$->formals.emplace_back(*$1); delete $1; }
| formal
- { $$ = new ParserFormals; $$->formals.push_back(*$1); $$->ellipsis = false; }
+ { $$ = new ParserFormals; $$->formals.emplace_back(*$1); $$->ellipsis = false; delete $1; }
|
{ $$ = new ParserFormals; $$->ellipsis = false; }
| ELLIPSIS
diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc
index 510595f7e..6c1992799 100644
--- a/src/libfetchers/git.cc
+++ b/src/libfetchers/git.cc
@@ -713,15 +713,42 @@ struct GitInputScheme : InputScheme
AutoDelete delTmpGitDir(tmpGitDir, true);
runProgram("git", true, { "-c", "init.defaultBranch=" + gitInitialBranch, "init", tmpDir, "--separate-git-dir", tmpGitDir });
- // TODO: repoDir might lack the ref (it only checks if rev
- // exists, see FIXME above) so use a big hammer and fetch
- // everything to ensure we get the rev.
- runProgram("git", true, { "-C", tmpDir, "fetch", "--quiet", "--force",
- "--update-head-ok", "--", repoDir, "refs/*:refs/*" });
+
+ {
+ // TODO: repoDir might lack the ref (it only checks if rev
+ // exists, see FIXME above) so use a big hammer and fetch
+ // everything to ensure we get the rev.
+ Activity act(*logger, lvlTalkative, actUnknown, fmt("making temporary clone of '%s'", repoDir));
+ runProgram("git", true, { "-C", tmpDir, "fetch", "--quiet", "--force",
+ "--update-head-ok", "--", repoDir, "refs/*:refs/*" });
+ }
runProgram("git", true, { "-C", tmpDir, "checkout", "--quiet", rev.gitRev() });
- runProgram("git", true, { "-C", tmpDir, "remote", "add", "origin", repoInfo.url });
- runProgram("git", true, { "-C", tmpDir, "submodule", "--quiet", "update", "--init", "--recursive" });
+
+ /* Ensure that we use the correct origin for fetching
+ submodules. This matters for submodules with relative
+ URLs. */
+ if (repoInfo.isLocal) {
+ writeFile(tmpGitDir + "/config", readFile(repoDir + "/" + repoInfo.gitDir + "/config"));
+
+ /* Restore the config.bare setting we may have just
+ copied erroneously from the user's repo. */
+ runProgram("git", true, { "-C", tmpDir, "config", "core.bare", "false" });
+ } else
+ runProgram("git", true, { "-C", tmpDir, "config", "remote.origin.url", repoInfo.url });
+
+ /* As an optimisation, copy the modules directory of the
+ source repo if it exists. */
+ auto modulesPath = repoDir + "/" + repoInfo.gitDir + "/modules";
+ if (pathExists(modulesPath)) {
+ Activity act(*logger, lvlTalkative, actUnknown, fmt("copying submodules of '%s'", repoInfo.url));
+ runProgram("cp", true, { "-R", "--", modulesPath, tmpGitDir + "/modules" });
+ }
+
+ {
+ Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching submodules of '%s'", repoInfo.url));
+ runProgram("git", true, { "-C", tmpDir, "submodule", "--quiet", "update", "--init", "--recursive" });
+ }
filter = isNotDotGitDirectory;
} else {
diff --git a/src/libmain/nix-main.pc.in b/src/libmain/nix-main.pc.in
index b46ce1990..fb3ead6fa 100644
--- a/src/libmain/nix-main.pc.in
+++ b/src/libmain/nix-main.pc.in
@@ -6,4 +6,4 @@ Name: Nix
Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Libs: -L${libdir} -lnixmain
-Cflags: -I${includedir}/nix -std=c++20
+Cflags: -I${includedir}/nix -std=c++2a
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index e1cc504f8..7b125f5d2 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -209,7 +209,7 @@ void LocalDerivationGoal::tryLocalBuild()
#if __linux__
if (useChroot) {
- if (!mountNamespacesSupported() || !pidNamespacesSupported()) {
+ if (!mountAndPidNamespacesSupported()) {
if (!settings.sandboxFallback)
throw Error("this system does not support the kernel namespaces that are required for sandboxing; use '--no-sandbox' to disable sandboxing");
debug("auto-disabling sandboxing because the prerequisite namespaces are not available");
@@ -385,12 +385,6 @@ void LocalDerivationGoal::cleanupPostOutputsRegisteredModeNonCheck()
}
-int childEntry(void * arg)
-{
- ((LocalDerivationGoal *) arg)->runChild();
- return 1;
-}
-
#if __linux__
static void linkOrCopy(const Path & from, const Path & to)
{
@@ -676,7 +670,8 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
- chownToBuilder(chrootRootDir + "/etc");
+ if (parsedDrv->useUidRange())
+ chownToBuilder(chrootRootDir + "/etc");
if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
@@ -916,21 +911,15 @@ void LocalDerivationGoal::startBuilder()
if (getuid() == 0 && setgroups(0, 0) == -1)
throw SysError("setgroups failed");
- size_t stackSize = 1 * 1024 * 1024;
- char * stack = (char *) mmap(0, stackSize,
- PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
- if (stack == MAP_FAILED) throw SysError("allocating stack");
-
- int flags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
+ ProcessOptions options;
+ options.cloneFlags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
if (privateNetwork)
- flags |= CLONE_NEWNET;
+ options.cloneFlags |= CLONE_NEWNET;
if (usingUserNamespace)
- flags |= CLONE_NEWUSER;
+ options.cloneFlags |= CLONE_NEWUSER;
- pid_t child = clone(childEntry, stack + stackSize, flags, this);
+ pid_t child = startProcess([&]() { runChild(); }, options);
- if (child == -1)
- throw SysError("creating sandboxed builder process using clone()");
writeFull(builderOut.writeSide.get(),
fmt("%d %d\n", usingUserNamespace, child));
_exit(0);
@@ -982,6 +971,10 @@ void LocalDerivationGoal::startBuilder()
"nobody:x:65534:65534:Nobody:/:/noshell\n",
sandboxUid(), sandboxGid(), settings.sandboxBuildDir));
+ /* Make /etc unwritable */
+ if (!parsedDrv->useUidRange())
+ chmod_(chrootRootDir + "/etc", 0555);
+
/* Save the mount- and user namespace of the child. We have to do this
*before* the child does a chroot. */
sandboxMountNamespace = open(fmt("/proc/%d/ns/mnt", (pid_t) pid).c_str(), O_RDONLY);
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 756bd4423..b25089ec3 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -101,6 +101,7 @@ struct curlFileTransfer : public FileTransfer
this->result.data.append(data);
})
{
+ requestHeaders = curl_slist_append(requestHeaders, "Accept-Encoding: zstd, br, gzip, deflate, bzip2, xz");
if (!request.expectedETag.empty())
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
if (!request.mimeType.empty())
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 42981219d..0a4912f67 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -945,6 +945,27 @@ public:
resolves to a different location from that of the build machine. You
can enable this setting if you are sure you're not going to do that.
)"};
+
+ Setting useXDGBaseDirectories{
+ this, false, "use-xdg-base-directories",
+ R"(
+ If set to `true`, Nix will conform to the [XDG Base Directory Specification] for files in `$HOME`.
+ The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/installation/env-variables.md).
+
+ [XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+ > **Warning**
+ > This changes the location of some well-known symlinks that Nix creates, which might break tools that rely on the old, non-XDG-conformant locations.
+
+ In particular, the following locations change:
+
+ | Old | New |
+ |-------------------|--------------------------------|
+ | `~/.nix-profile` | `$XDG_STATE_HOME/nix/profile` |
+ | `~/.nix-defexpr` | `$XDG_STATE_HOME/nix/defexpr` |
+ | `~/.nix-channels` | `$XDG_STATE_HOME/nix/channels` |
+ )"
+ };
};
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 73bcd6e81..1479822a9 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -56,7 +56,7 @@ public:
void init() override
{
// FIXME: do this lazily?
- if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
+ if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) {
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
priority.setDefault(cacheInfo->priority);
} else {
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 3e0689534..2645f468b 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -84,11 +84,10 @@ public:
Sync _state;
- NarInfoDiskCacheImpl()
+ NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite")
{
auto state(_state.lock());
- Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
createDirs(dirOf(dbPath));
state->db = SQLite(dbPath);
@@ -98,7 +97,7 @@ public:
state->db.exec(schema);
state->insertCache.create(state->db,
- "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
+ "insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;");
state->queryCache.create(state->db,
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
@@ -166,6 +165,8 @@ public:
return i->second;
}
+private:
+
std::optional queryCacheRaw(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
@@ -173,15 +174,21 @@ public:
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
- state.caches.emplace(uri,
- Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+ auto cache = Cache {
+ .id = (int) queryCache.getInt(0),
+ .storeDir = queryCache.getStr(1),
+ .wantMassQuery = queryCache.getInt(2) != 0,
+ .priority = (int) queryCache.getInt(3),
+ };
+ state.caches.emplace(uri, cache);
}
return getCache(state, uri);
}
- void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
+public:
+ int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
- retrySQLite([&]() {
+ return retrySQLite([&]() {
auto state(_state.lock());
SQLiteTxn txn(state->db);
@@ -190,17 +197,29 @@ public:
auto cache(queryCacheRaw(*state, uri));
if (cache)
- return;
+ return cache->id;
- state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
- assert(sqlite3_changes(state->db) == 1);
- state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+ Cache ret {
+ .id = -1, // set below
+ .storeDir = storeDir,
+ .wantMassQuery = wantMassQuery,
+ .priority = priority,
+ };
+
+ {
+ auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
+ assert(r.next());
+ ret.id = (int) r.getInt(0);
+ }
+
+ state->caches[uri] = ret;
txn.commit();
+ return ret.id;
});
}
- std::optional cacheExists(const std::string & uri) override
+ std::optional upToDateCacheExists(const std::string & uri) override
{
return retrySQLite>([&]() -> std::optional {
auto state(_state.lock());
@@ -208,6 +227,7 @@ public:
if (!cache)
return std::nullopt;
return CacheInfo {
+ .id = cache->id,
.wantMassQuery = cache->wantMassQuery,
.priority = cache->priority
};
@@ -371,4 +391,9 @@ ref getNarInfoDiskCache()
return cache;
}
+ref getTestNarInfoDiskCache(Path dbPath)
+{
+ return make_ref(dbPath);
+}
+
}
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
index 2dcaa76a4..4877f56d8 100644
--- a/src/libstore/nar-info-disk-cache.hh
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -13,16 +13,17 @@ public:
virtual ~NarInfoDiskCache() { }
- virtual void createCache(const std::string & uri, const Path & storeDir,
+ virtual int createCache(const std::string & uri, const Path & storeDir,
bool wantMassQuery, int priority) = 0;
struct CacheInfo
{
+ int id;
bool wantMassQuery;
int priority;
};
- virtual std::optional cacheExists(const std::string & uri) = 0;
+ virtual std::optional upToDateCacheExists(const std::string & uri) = 0;
virtual std::pair> lookupNarInfo(
const std::string & uri, const std::string & hashPart) = 0;
@@ -45,4 +46,6 @@ public:
multiple threads. */
ref getNarInfoDiskCache();
+ref getTestNarInfoDiskCache(Path dbPath);
+
}
diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in
index 385169a13..dc42d0bca 100644
--- a/src/libstore/nix-store.pc.in
+++ b/src/libstore/nix-store.pc.in
@@ -6,4 +6,4 @@ Name: Nix
Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Libs: -L${libdir} -lnixstore -lnixutil
-Cflags: -I${includedir}/nix -std=c++20
+Cflags: -I${includedir}/nix -std=c++2a
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index b202351ce..c551c5f3e 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -282,7 +282,7 @@ std::string optimisticLockProfile(const Path & profile)
Path profilesDir()
{
- auto profileRoot = getDataDir() + "/nix/profiles";
+ auto profileRoot = createNixStateDir() + "/profiles";
createDirs(profileRoot);
return profileRoot;
}
@@ -290,7 +290,7 @@ Path profilesDir()
Path getDefaultProfile()
{
- Path profileLink = getHome() + "/.nix-profile";
+ Path profileLink = settings.useXDGBaseDirectories ? createNixStateDir() + "/profile" : getHome() + "/.nix-profile";
try {
auto profile =
getuid() == 0
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index 73667a798..fbf95b850 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -72,8 +72,9 @@ std::string optimisticLockProfile(const Path & profile);
profiles. */
Path profilesDir();
-/* Resolve ~/.nix-profile. If ~/.nix-profile doesn't exist yet, create
- it. */
+/* Resolve the default profile (~/.nix-profile by default, $XDG_STATE_HOME/
+ nix/profile if XDG Base Directory Support is enabled), and create if doesn't
+ exist */
Path getDefaultProfile();
}
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 844553ad3..8d76eee99 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -238,7 +238,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
void init() override
{
- if (auto cacheInfo = diskCache->cacheExists(getUri())) {
+ if (auto cacheInfo = diskCache->upToDateCacheExists(getUri())) {
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
priority.setDefault(cacheInfo->priority);
} else {
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 353dff9fa..871f2f3be 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -41,6 +41,15 @@ SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int ex
throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
}
+static void traceSQL(void * x, const char * sql)
+{
+ // wacky delimiters:
+ // so that we're quite unambiguous without escaping anything
+ // notice instead of trace:
+ // so that this can be enabled without getting the firehose in our face.
+ notice("SQL<[%1%]>", sql);
+};
+
SQLite::SQLite(const Path & path, bool create)
{
// useSQLiteWAL also indicates what virtual file system we need. Using
@@ -58,6 +67,11 @@ SQLite::SQLite(const Path & path, bool create)
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
SQLiteError::throw_(db, "setting timeout");
+ if (getEnv("NIX_DEBUG_SQLITE_TRACES") == "1") {
+ // To debug sqlite statements; trace all of them
+ sqlite3_trace(db, &traceSQL, nullptr);
+ }
+
exec("pragma foreign_keys = 1");
}
diff --git a/src/libstore/tests/nar-info-disk-cache.cc b/src/libstore/tests/nar-info-disk-cache.cc
new file mode 100644
index 000000000..b4bdb8329
--- /dev/null
+++ b/src/libstore/tests/nar-info-disk-cache.cc
@@ -0,0 +1,123 @@
+#include "nar-info-disk-cache.hh"
+
+#include
+#include
+#include "sqlite.hh"
+#include
+
+
+namespace nix {
+
+TEST(NarInfoDiskCacheImpl, create_and_read) {
+ // This is a large single test to avoid some setup overhead.
+
+ int prio = 12345;
+ bool wantMassQuery = true;
+
+ Path tmpDir = createTempDir();
+ AutoDelete delTmpDir(tmpDir);
+ Path dbPath(tmpDir + "/test-narinfo-disk-cache.sqlite");
+
+ int savedId;
+ int barId;
+ SQLite db;
+ SQLiteStmt getIds;
+
+ {
+ auto cache = getTestNarInfoDiskCache(dbPath);
+
+ // Set up "background noise" and check that different caches receive different ids
+ {
+ auto bc1 = cache->createCache("https://bar", "/nix/storedir", wantMassQuery, prio);
+ auto bc2 = cache->createCache("https://xyz", "/nix/storedir", false, 12);
+ ASSERT_NE(bc1, bc2);
+ barId = bc1;
+ }
+
+ // Check that the fields are saved and returned correctly. This does not test
+ // the select statement yet, because of in-memory caching.
+ savedId = cache->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);;
+ {
+ auto r = cache->upToDateCacheExists("http://foo");
+ ASSERT_TRUE(r);
+ ASSERT_EQ(r->priority, prio);
+ ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ ASSERT_EQ(savedId, r->id);
+ }
+
+ // We're going to pay special attention to the id field because we had a bug
+ // that changed it.
+ db = SQLite(dbPath);
+ getIds.create(db, "select id from BinaryCaches where url = 'http://foo'");
+
+ {
+ auto q(getIds.use());
+ ASSERT_TRUE(q.next());
+ ASSERT_EQ(savedId, q.getInt(0));
+ ASSERT_FALSE(q.next());
+ }
+
+ // Pretend that the caches are older, but keep one up to date, as "background noise"
+ db.exec("update BinaryCaches set timestamp = timestamp - 1 - 7 * 24 * 3600 where url <> 'https://xyz';");
+
+ // This shows that the in-memory cache works
+ {
+ auto r = cache->upToDateCacheExists("http://foo");
+ ASSERT_TRUE(r);
+ ASSERT_EQ(r->priority, prio);
+ ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ }
+ }
+
+ {
+ // We can't clear the in-memory cache, so we use a new cache object. This is
+ // more realistic anyway.
+ auto cache2 = getTestNarInfoDiskCache(dbPath);
+
+ {
+ auto r = cache2->upToDateCacheExists("http://foo");
+ ASSERT_FALSE(r);
+ }
+
+ // "Update", same data, check that the id number is reused
+ cache2->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);
+
+ {
+ auto r = cache2->upToDateCacheExists("http://foo");
+ ASSERT_TRUE(r);
+ ASSERT_EQ(r->priority, prio);
+ ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ ASSERT_EQ(r->id, savedId);
+ }
+
+ {
+ auto q(getIds.use());
+ ASSERT_TRUE(q.next());
+ auto currentId = q.getInt(0);
+ ASSERT_FALSE(q.next());
+ ASSERT_EQ(currentId, savedId);
+ }
+
+ // Check that the fields can be modified, and the id remains the same
+ {
+ auto r0 = cache2->upToDateCacheExists("https://bar");
+ ASSERT_FALSE(r0);
+
+ cache2->createCache("https://bar", "/nix/storedir", !wantMassQuery, prio + 10);
+ auto r = cache2->upToDateCacheExists("https://bar");
+ ASSERT_EQ(r->wantMassQuery, !wantMassQuery);
+ ASSERT_EQ(r->priority, prio + 10);
+ ASSERT_EQ(r->id, barId);
+ }
+
+ // // Force update (no use case yet; we only retrieve cache metadata when stale based on timestamp)
+ // {
+ // cache2->createCache("https://bar", "/nix/storedir", wantMassQuery, prio + 20);
+ // auto r = cache2->upToDateCacheExists("https://bar");
+ // ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ // ASSERT_EQ(r->priority, prio + 20);
+ // }
+ }
+}
+
+}
diff --git a/src/libutil/args.cc b/src/libutil/args.cc
index 2930913d6..35686a8aa 100644
--- a/src/libutil/args.cc
+++ b/src/libutil/args.cc
@@ -29,7 +29,15 @@ void Args::removeFlag(const std::string & longName)
void Completions::add(std::string completion, std::string description)
{
- assert(description.find('\n') == std::string::npos);
+ description = trim(description);
+ // ellipsize overflowing content on the back of the description
+ auto end_index = description.find_first_of(".\n");
+ if (end_index != std::string::npos) {
+ auto needs_ellipsis = end_index != description.size() - 1;
+ description.resize(end_index);
+ if (needs_ellipsis)
+ description.append(" [...]");
+ }
insert(Completion {
.completion = completion,
.description = description
diff --git a/src/libutil/namespaces.cc b/src/libutil/namespaces.cc
index fdd52d92b..f66accb10 100644
--- a/src/libutil/namespaces.cc
+++ b/src/libutil/namespaces.cc
@@ -4,7 +4,7 @@
#include "util.hh"
#include "finally.hh"
-#include
+#include
namespace nix {
@@ -33,63 +33,60 @@ bool userNamespacesSupported()
return false;
}
- Pid pid = startProcess([&]()
- {
- auto res = unshare(CLONE_NEWUSER);
- _exit(res ? 1 : 0);
- });
+ try {
+ Pid pid = startProcess([&]()
+ {
+ _exit(0);
+ }, {
+ .cloneFlags = CLONE_NEWUSER
+ });
- bool supported = pid.wait() == 0;
+ auto r = pid.wait();
+ assert(!r);
+ } catch (SysError & e) {
+ debug("user namespaces do not work on this system: %s", e.msg());
+ return false;
+ }
- if (!supported)
- debug("user namespaces do not work on this system");
-
- return supported;
+ return true;
}();
return res;
}
-bool mountNamespacesSupported()
+bool mountAndPidNamespacesSupported()
{
static auto res = [&]() -> bool
{
- bool useUserNamespace = userNamespacesSupported();
+ try {
- Pid pid = startProcess([&]()
- {
- auto res = unshare(CLONE_NEWNS | (useUserNamespace ? CLONE_NEWUSER : 0));
- _exit(res ? 1 : 0);
- });
+ Pid pid = startProcess([&]()
+ {
+ /* Make sure we don't remount the parent's /proc. */
+ if (mount(0, "/", 0, MS_PRIVATE | MS_REC, 0) == -1)
+ _exit(1);
- bool supported = pid.wait() == 0;
+ /* Test whether we can remount /proc. The kernel disallows
+ this if /proc is not fully visible, i.e. if there are
+ filesystems mounted on top of files inside /proc. See
+ https://lore.kernel.org/lkml/87tvsrjai0.fsf@xmission.com/T/. */
+ if (mount("none", "/proc", "proc", 0, 0) == -1)
+ _exit(2);
- if (!supported)
- debug("mount namespaces do not work on this system");
+ _exit(0);
+ }, {
+ .cloneFlags = CLONE_NEWNS | CLONE_NEWPID | (userNamespacesSupported() ? CLONE_NEWUSER : 0)
+ });
- return supported;
- }();
- return res;
-}
-
-bool pidNamespacesSupported()
-{
- static auto res = [&]() -> bool
- {
- /* Check whether /proc is fully visible, i.e. there are no
- filesystems mounted on top of files inside /proc. If this
- is not the case, then we cannot mount a new /proc inside
- the sandbox that matches the sandbox's PID namespace.
- See https://lore.kernel.org/lkml/87tvsrjai0.fsf@xmission.com/T/. */
- auto fp = fopen("/proc/mounts", "r");
- if (!fp) return false;
- Finally delFP = [&]() { fclose(fp); };
-
- while (auto ent = getmntent(fp))
- if (hasPrefix(std::string_view(ent->mnt_dir), "/proc/")) {
- debug("PID namespaces do not work because /proc is not fully visible; disabling sandboxing");
+ if (pid.wait()) {
+ debug("PID namespaces do not work on this system: cannot remount /proc");
return false;
}
+ } catch (SysError & e) {
+ debug("mount namespaces do not work on this system: %s", e.msg());
+ return false;
+ }
+
return true;
}();
return res;
diff --git a/src/libutil/namespaces.hh b/src/libutil/namespaces.hh
index 34e54d5ad..e82379b9c 100644
--- a/src/libutil/namespaces.hh
+++ b/src/libutil/namespaces.hh
@@ -6,9 +6,7 @@ namespace nix {
bool userNamespacesSupported();
-bool mountNamespacesSupported();
-
-bool pidNamespacesSupported();
+bool mountAndPidNamespacesSupported();
#endif
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 40faa4bf2..885bae69c 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -36,6 +36,7 @@
#ifdef __linux__
#include
#include
+#include
#include
#endif
@@ -608,6 +609,19 @@ Path getDataDir()
return dataDir ? *dataDir : getHome() + "/.local/share";
}
+Path getStateDir()
+{
+ auto stateDir = getEnv("XDG_STATE_HOME");
+ return stateDir ? *stateDir : getHome() + "/.local/state";
+}
+
+Path createNixStateDir()
+{
+ Path dir = getStateDir() + "/nix";
+ createDirs(dir);
+ return dir;
+}
+
std::optional getSelfExe()
{
@@ -1051,9 +1065,17 @@ static pid_t doFork(bool allowVfork, std::function fun)
}
+static int childEntry(void * arg)
+{
+ auto main = (std::function *) arg;
+ (*main)();
+ return 1;
+}
+
+
pid_t startProcess(std::function fun, const ProcessOptions & options)
{
- auto wrapper = [&]() {
+ std::function wrapper = [&]() {
if (!options.allowVfork)
logger = makeSimpleLogger();
try {
@@ -1073,7 +1095,27 @@ pid_t startProcess(std::function fun, const ProcessOptions & options)
_exit(1);
};
- pid_t pid = doFork(options.allowVfork, wrapper);
+ pid_t pid = -1;
+
+ if (options.cloneFlags) {
+ #ifdef __linux__
+ // Not supported, since then we don't know when to free the stack.
+ assert(!(options.cloneFlags & CLONE_VM));
+
+ size_t stackSize = 1 * 1024 * 1024;
+ auto stack = (char *) mmap(0, stackSize,
+ PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (stack == MAP_FAILED) throw SysError("allocating stack");
+
+ Finally freeStack([&]() { munmap(stack, stackSize); });
+
+ pid = clone(childEntry, stack + stackSize, options.cloneFlags | SIGCHLD, &wrapper);
+ #else
+ throw Error("clone flags are only supported on Linux");
+ #endif
+ } else
+ pid = doFork(options.allowVfork, wrapper);
+
if (pid == -1) throw SysError("unable to fork");
return pid;
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index 266da0ae3..b5625ecef 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -158,6 +158,12 @@ Path getDataDir();
/* Return the path of the current executable. */
std::optional getSelfExe();
+/* Return $XDG_STATE_HOME or $HOME/.local/state. */
+Path getStateDir();
+
+/* Create the Nix state directory and return the path to it. */
+Path createNixStateDir();
+
/* Create a directory and all its parents, if necessary. Returns the
list of created directories, in order of creation. */
Paths createDirs(const Path & path);
@@ -301,6 +307,7 @@ struct ProcessOptions
bool dieWithParent = true;
bool runExitHandlers = false;
bool allowVfork = false;
+ int cloneFlags = 0; // use clone() with the specified flags (Linux only)
};
pid_t startProcess(std::function fun, const ProcessOptions & options = ProcessOptions());
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
index 26b53eacc..65016025b 100755
--- a/src/nix-channel/nix-channel.cc
+++ b/src/nix-channel/nix-channel.cc
@@ -164,8 +164,8 @@ static int main_nix_channel(int argc, char ** argv)
{
// Figure out the name of the `.nix-channels' file to use
auto home = getHome();
- channelsList = home + "/.nix-channels";
- nixDefExpr = home + "/.nix-defexpr";
+ channelsList = settings.useXDGBaseDirectories ? createNixStateDir() + "/channels" : home + "/.nix-channels";
+ nixDefExpr = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : home + "/.nix-defexpr";
// Figure out the name of the channels profile.
profile = profilesDir() + "/channels";
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 4290c40cd..cffba9201 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -1291,7 +1291,7 @@ static void opSwitchProfile(Globals & globals, Strings opFlags, Strings opArgs)
throw UsageError("exactly one argument expected");
Path profile = absPath(opArgs.front());
- Path profileLink = getHome() + "/.nix-profile";
+ Path profileLink = settings.useXDGBaseDirectories ? createNixStateDir() + "/profile" : getHome() + "/.nix-profile";
switchLink(profileLink, profile);
}
@@ -1391,14 +1391,14 @@ static int main_nix_env(int argc, char * * argv)
Operation op = 0;
RepairFlag repair = NoRepair;
std::string file;
- Path nixExprPath;
Globals globals;
globals.instSource.type = srcUnknown;
- nixExprPath = getHome() + "/.nix-defexpr";
globals.instSource.systemFilter = "*";
+ Path nixExprPath = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : getHome() + "/.nix-defexpr";
+
if (!pathExists(nixExprPath)) {
try {
createDirs(nixExprPath);
diff --git a/src/nix/app.cc b/src/nix/app.cc
index 08cd0ccd4..5cd65136f 100644
--- a/src/nix/app.cc
+++ b/src/nix/app.cc
@@ -1,4 +1,5 @@
#include "installables.hh"
+#include "installable-derived-path.hh"
#include "store-api.hh"
#include "eval-inline.hh"
#include "eval-cache.hh"
@@ -8,30 +9,6 @@
namespace nix {
-struct InstallableDerivedPath : Installable
-{
- ref store;
- const DerivedPath derivedPath;
-
- InstallableDerivedPath(ref store, const DerivedPath & derivedPath)
- : store(store)
- , derivedPath(derivedPath)
- {
- }
-
- std::string what() const override { return derivedPath.to_string(*store); }
-
- DerivedPathsWithInfo toDerivedPaths() override
- {
- return {{derivedPath}};
- }
-
- std::optional getStorePath() override
- {
- return std::nullopt;
- }
-};
-
/**
* Return the rewrites that are needed to resolve a string whose context is
* included in `dependencies`.
@@ -146,7 +123,7 @@ App UnresolvedApp::resolve(ref evalStore, ref store)
for (auto & ctxElt : unresolved.context)
installableContext.push_back(
- std::make_shared(store, ctxElt));
+ std::make_shared(store, DerivedPath { ctxElt }));
auto builtContext = Installable::build(evalStore, store, Realise::Outputs, installableContext);
res.program = resolveString(*store, unresolved.program, builtContext);
diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc
index 2ba56ee26..a22bccba1 100644
--- a/src/nix/daemon.cc
+++ b/src/nix/daemon.cc
@@ -34,7 +34,7 @@
using namespace nix;
using namespace nix::daemon;
-struct UserSettings : Config {
+struct AuthorizationSettings : Config {
Setting trustedUsers{
this, {"root"}, "trusted-users",
@@ -67,9 +67,9 @@ struct UserSettings : Config {
)"};
};
-UserSettings userSettings;
+AuthorizationSettings authorizationSettings;
-static GlobalConfig::Register rSettings(&userSettings);
+static GlobalConfig::Register rSettings(&authorizationSettings);
#ifndef __linux__
#define SPLICE_F_MOVE 0
@@ -240,8 +240,8 @@ static void daemonLoop()
struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
std::string group = gr ? gr->gr_name : std::to_string(peer.gid);
- Strings trustedUsers = userSettings.trustedUsers;
- Strings allowedUsers = userSettings.allowedUsers;
+ Strings trustedUsers = authorizationSettings.trustedUsers;
+ Strings allowedUsers = authorizationSettings.allowedUsers;
if (matchUser(user, group, trustedUsers))
trusted = Trusted;
diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc
index 0621d662c..3489cc132 100644
--- a/src/nix/diff-closures.cc
+++ b/src/nix/diff-closures.cc
@@ -106,7 +106,7 @@ void printClosureDiff(
using namespace nix;
-struct CmdDiffClosures : SourceExprCommand
+struct CmdDiffClosures : SourceExprCommand, MixOperateOnOptions
{
std::string _before, _after;
diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc
index 76125e5e4..a3a9dc698 100644
--- a/src/nix/why-depends.cc
+++ b/src/nix/why-depends.cc
@@ -27,7 +27,7 @@ static std::string filterPrintable(const std::string & s)
return res;
}
-struct CmdWhyDepends : SourceExprCommand
+struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions
{
std::string _package, _dependency;
bool all = false;
diff --git a/tests/build-delete.sh b/tests/build-delete.sh
new file mode 100644
index 000000000..636681f64
--- /dev/null
+++ b/tests/build-delete.sh
@@ -0,0 +1,56 @@
+source common.sh
+
+clearStore
+
+set -o pipefail
+
+# https://github.com/NixOS/nix/issues/6572
+issue_6572_independent_outputs() {
+ nix build -f multiple-outputs.nix --json independent --no-link > $TEST_ROOT/independent.json
+
+ # Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
+ p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
+ nix-store --delete "$p" # Clean up for next test
+
+ # Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
+ nix-store --delete "$(jq -r <$TEST_ROOT/independent.json .[0].outputs.first)"
+ p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
+ cmp $p < $TEST_ROOT/a.json
+
+ # # Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
+ p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
+ nix-store --delete "$p" # Clean up for next test
+
+ # Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
+ nix-store --delete "$(jq -r <$TEST_ROOT/a.json .[0].outputs.second)"
+ p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
+ cmp $p < $TEST_ROOT/independent.json
-
- # Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
- p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
- nix-store --delete "$p" # Clean up for next test
-
- # Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
- nix-store --delete "$(jq -r <$TEST_ROOT/independent.json .[0].outputs.first)"
- p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
- cmp $p < $TEST_ROOT/a.json
-
- # # Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
- p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
- nix-store --delete "$p" # Clean up for next test
-
- # Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
- nix-store --delete "$(jq -r <$TEST_ROOT/a.json .[0].outputs.second)"
- p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
- cmp $p < /dev/null); then
@@ -27,6 +29,8 @@ export NIX_REMOTE=$NIX_REMOTE_
unset NIX_PATH
export TEST_HOME=$TEST_ROOT/test-home
export HOME=$TEST_HOME
+unset XDG_STATE_HOME
+unset XDG_DATA_HOME
unset XDG_CONFIG_HOME
unset XDG_CONFIG_DIRS
unset XDG_CACHE_HOME
@@ -62,8 +66,8 @@ readLink() {
}
clearProfiles() {
- profiles="$HOME"/.local/share/nix/profiles
- rm -rf $profiles
+ profiles="$HOME"/.local/state/nix/profiles
+ rm -rf "$profiles"
}
clearStore() {
diff --git a/tests/fetchGitSubmodules.sh b/tests/fetchGitSubmodules.sh
index 50da4cb97..08ccaa3cd 100644
--- a/tests/fetchGitSubmodules.sh
+++ b/tests/fetchGitSubmodules.sh
@@ -104,3 +104,28 @@ noSubmoduleRepoBaseline=$(nix eval --raw --expr "(builtins.fetchGit { url = file
noSubmoduleRepo=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subRepo; rev = \"$subRev\"; submodules = true; }).outPath")
[[ $noSubmoduleRepoBaseline == $noSubmoduleRepo ]]
+
+# Test relative submodule URLs.
+rm $TEST_HOME/.cache/nix/fetcher-cache*
+rm -rf $rootRepo/.git $rootRepo/.gitmodules $rootRepo/sub
+initGitRepo $rootRepo
+git -C $rootRepo submodule add ../gitSubmodulesSub sub
+git -C $rootRepo commit -m "Add submodule"
+rev2=$(git -C $rootRepo rev-parse HEAD)
+pathWithRelative=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; rev = \"$rev2\"; submodules = true; }).outPath")
+diff -r -x .gitmodules $pathWithSubmodules $pathWithRelative
+
+# Test clones that have an upstream with relative submodule URLs.
+rm $TEST_HOME/.cache/nix/fetcher-cache*
+cloneRepo=$TEST_ROOT/a/b/gitSubmodulesClone # NB /a/b to make the relative path not work relative to $cloneRepo
+git clone $rootRepo $cloneRepo
+pathIndirect=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$cloneRepo; rev = \"$rev2\"; submodules = true; }).outPath")
+[[ $pathIndirect = $pathWithRelative ]]
+
+# Test that if the clone has the submodule already, we're not fetching
+# it again.
+git -C $cloneRepo submodule update --init
+rm $TEST_HOME/.cache/nix/fetcher-cache*
+rm -rf $subRepo
+pathSubmoduleGone=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$cloneRepo; rev = \"$rev2\"; submodules = true; }).outPath")
+[[ $pathSubmoduleGone = $pathWithRelative ]]
diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh
index 3f304ac2f..e62039567 100644
--- a/tests/linux-sandbox.sh
+++ b/tests/linux-sandbox.sh
@@ -37,3 +37,6 @@ nix-build check.nix -A nondeterministic --sandbox-paths /nix/store --no-out-link
(! nix-build check.nix -A nondeterministic --sandbox-paths /nix/store --no-out-link --check -K 2> $TEST_ROOT/log)
if grep -q 'error: renaming' $TEST_ROOT/log; then false; fi
grep -q 'may not be deterministic' $TEST_ROOT/log
+
+# Test that sandboxed builds cannot write to /etc easily
+(! nix-build -E 'with import ./config.nix; mkDerivation { name = "etc-write"; buildCommand = "echo > /etc/test"; }' --no-out-link --sandbox-paths /nix/store)
diff --git a/tests/local.mk b/tests/local.mk
index 9117d1637..1123c6fb8 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -22,6 +22,7 @@ nix_tests = \
binary-cache.sh \
multiple-outputs.sh \
ca/build.sh \
+ ca/new-build-cmd.sh \
nix-build.sh \
gc-concurrent.sh \
repair.sh \
@@ -104,6 +105,8 @@ nix_tests = \
ssh-relay.sh \
plugins.sh \
build.sh \
+ build-delete.sh \
+ output-normalization.sh \
ca/nix-run.sh \
selfref-gc.sh ca/selfref-gc.sh \
db-migration.sh \
diff --git a/tests/nix-channel.sh b/tests/nix-channel.sh
index 54b8f5979..b64283f48 100644
--- a/tests/nix-channel.sh
+++ b/tests/nix-channel.sh
@@ -12,6 +12,19 @@ nix-channel --remove xyzzy
[ -e $TEST_HOME/.nix-channels ]
[ "$(cat $TEST_HOME/.nix-channels)" = '' ]
+# Test the XDG Base Directories support
+
+export NIX_CONFIG="use-xdg-base-directories = true"
+
+nix-channel --add http://foo/bar xyzzy
+nix-channel --list | grep -q http://foo/bar
+nix-channel --remove xyzzy
+
+unset NIX_CONFIG
+
+[ -e $TEST_HOME/.local/state/nix/channels ]
+[ "$(cat $TEST_HOME/.local/state/nix/channels)" = '' ]
+
# Create a channel.
rm -rf $TEST_ROOT/foo
mkdir -p $TEST_ROOT/foo
diff --git a/tests/nix-profile.sh b/tests/nix-profile.sh
index 7ba3235fa..266dc9e49 100644
--- a/tests/nix-profile.sh
+++ b/tests/nix-profile.sh
@@ -56,6 +56,14 @@ nix profile history
nix profile history | grep "packages.$system.default: ∅ -> 1.0"
nix profile diff-closures | grep 'env-manifest.nix: ε → ∅'
+# Test XDG Base Directories support
+
+export NIX_CONFIG="use-xdg-base-directories = true"
+nix profile remove 1
+nix profile install $flake1Dir
+[[ $($TEST_HOME/.local/state/nix/profile/bin/hello) = "Hello World" ]]
+unset NIX_CONFIG
+
# Test upgrading a package.
printf NixOS > $flake1Dir/who
printf 2.0 > $flake1Dir/version
diff --git a/tests/nixos/authorization.nix b/tests/nixos/authorization.nix
new file mode 100644
index 000000000..7e8744dd9
--- /dev/null
+++ b/tests/nixos/authorization.nix
@@ -0,0 +1,79 @@
+{
+ name = "authorization";
+
+ nodes.machine = {
+ virtualisation.writableStore = true;
+ # TODO add a test without allowed-users setting. allowed-users is uncommon among NixOS users.
+ nix.settings.allowed-users = ["alice" "bob"];
+ nix.settings.trusted-users = ["alice"];
+
+ users.users.alice.isNormalUser = true;
+ users.users.bob.isNormalUser = true;
+ users.users.mallory.isNormalUser = true;
+
+ nix.settings.experimental-features = "nix-command";
+ };
+
+ testScript =
+ let
+ pathFour = "/nix/store/20xfy868aiic0r0flgzq4n5dq1yvmxkn-four";
+ in
+ ''
+ machine.wait_for_unit("multi-user.target")
+ machine.succeed("""
+ exec 1>&2
+ echo kSELDhobKaF8/VdxIxdP7EQe+Q > one
+ diff $(nix store add-file one) one
+ """)
+ machine.succeed("""
+ su --login alice -c '
+ set -x
+ cd ~
+ echo ehHtmfuULXYyBV6NBk6QUi8iE0 > two
+ ls
+ diff $(echo $(nix store add-file two)) two' 1>&2
+ """)
+ machine.succeed("""
+ su --login bob -c '
+ set -x
+ cd ~
+ echo 0Jw8RNp7cK0W2AdNbcquofcOVk > three
+ diff $(nix store add-file three) three
+ ' 1>&2
+ """)
+
+ # We're going to check that a path is not created
+ machine.succeed("""
+ ! [[ -e ${pathFour} ]]
+ """)
+ machine.succeed("""
+ su --login mallory -c '
+ set -x
+ cd ~
+ echo 5mgtDj0ohrWkT50TLR0f4tIIxY > four;
+ (! nix store add-file four 2>&1) | grep -F "cannot open connection to remote store"
+ (! nix store add-file four 2>&1) | grep -F "Connection reset by peer"
+ ! [[ -e ${pathFour} ]]
+ ' 1>&2
+ """)
+
+ # Check that the file _can_ be added, and matches the expected path we were checking
+ machine.succeed("""
+ exec 1>&2
+ echo 5mgtDj0ohrWkT50TLR0f4tIIxY > four
+ four="$(nix store add-file four)"
+ diff $four four
+ diff <(echo $four) <(echo ${pathFour})
+ """)
+
+ machine.succeed("""
+ su --login alice -c 'nix-store --verify --repair'
+ """)
+
+ machine.succeed("""
+ set -x
+ su --login bob -c '(! nix-store --verify --repair 2>&1)' | tee diag 1>&2
+ grep -F "you are not privileged to repair paths" diag
+ """)
+ '';
+}
diff --git a/tests/nixos/containers/systemd-nspawn.nix b/tests/nixos/containers/systemd-nspawn.nix
index 424436b3f..f54f32f2a 100644
--- a/tests/nixos/containers/systemd-nspawn.nix
+++ b/tests/nixos/containers/systemd-nspawn.nix
@@ -56,12 +56,12 @@ runCommand "test"
# Make /run a tmpfs to shut up a systemd warning.
mkdir /run
mount -t tmpfs none /run
- chmod 0700 /run
mount -t cgroup2 none /sys/fs/cgroup
mkdir -p $out
+ chmod +w /etc
touch /etc/os-release
echo a5ea3f98dedc0278b6f3cc8c37eeaeac > /etc/machine-id
diff --git a/tests/nixos/remote-builds.nix b/tests/nixos/remote-builds.nix
index 696cd2652..1c96cc787 100644
--- a/tests/nixos/remote-builds.nix
+++ b/tests/nixos/remote-builds.nix
@@ -11,6 +11,11 @@ let
{ services.openssh.enable = true;
virtualisation.writableStore = true;
nix.settings.sandbox = true;
+
+ # Regression test for use of PID namespaces when /proc has
+ # filesystems mounted on top of it
+ # (i.e. /proc/sys/fs/binfmt_misc).
+ boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
};
# Trivial Nix expression to build remotely.
diff --git a/tests/output-normalization.sh b/tests/output-normalization.sh
new file mode 100644
index 000000000..0f6df5e31
--- /dev/null
+++ b/tests/output-normalization.sh
@@ -0,0 +1,9 @@
+source common.sh
+
+testNormalization () {
+ clearStore
+ outPath=$(nix-build ./simple.nix --no-out-link)
+ test "$(stat -c %Y $outPath)" -eq 1
+}
+
+testNormalization
|