1
0
Fork 0
mirror of https://github.com/NixOS/nix synced 2025-07-08 06:53:54 +02:00

Merge remote-tracking branch 'upstream/2.3-maintenance' into builder-host-key-stable

This commit is contained in:
John Ericson 2023-10-26 16:01:18 -04:00
commit b4abe56a23
54 changed files with 776 additions and 402 deletions

76
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,76 @@
name: "Test"
on:
pull_request:
push:
jobs:
tests:
needs: [check_cachix]
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
steps:
- uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v16
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true'
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- run: nix-build release.nix -A build.$(nix-instantiate --eval -E '(builtins.currentSystem)')
check_cachix:
name: Cachix secret present for installer tests
runs-on: ubuntu-latest
outputs:
secret: ${{ steps.secret.outputs.secret }}
steps:
- name: Check for Cachix secret
id: secret
env:
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
installer:
needs: [tests, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
runs-on: ubuntu-latest
outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps:
- uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v16
- uses: cachix/cachix-action@v10
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- id: prepare-installer
run: scripts/prepare-installer-for-github-actions
installer_test:
needs: [installer, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2.4.0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v16
with:
install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
- run: nix-instantiate -E 'builtins.currentTime' --eval

View file

@ -1 +1 @@
2.3.10 2.3.17

View file

@ -1,3 +1,4 @@
HOST_OS = @host_os@
AR = @AR@ AR = @AR@
BDW_GC_LIBS = @BDW_GC_LIBS@ BDW_GC_LIBS = @BDW_GC_LIBS@
BUILD_SHARED_LIBS = @BUILD_SHARED_LIBS@ BUILD_SHARED_LIBS = @BUILD_SHARED_LIBS@
@ -18,6 +19,7 @@ SODIUM_LIBS = @SODIUM_LIBS@
LIBLZMA_LIBS = @LIBLZMA_LIBS@ LIBLZMA_LIBS = @LIBLZMA_LIBS@
SQLITE3_LIBS = @SQLITE3_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
LIBZSTD_LIBS = @LIBZSTD_LIBS@
EDITLINE_LIBS = @EDITLINE_LIBS@ EDITLINE_LIBS = @EDITLINE_LIBS@
bash = @bash@ bash = @bash@
bindir = @bindir@ bindir = @bindir@

View file

@ -33,14 +33,6 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
esac]) esac])
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
case $sys_name in
cygwin*)
sys_name=cygwin
;;
esac
AC_MSG_RESULT($system) AC_MSG_RESULT($system)
AC_SUBST(system) AC_SUBST(system)
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')]) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')])
@ -67,10 +59,12 @@ AC_SYS_LARGEFILE
# Solaris-specific stuff. # Solaris-specific stuff.
AC_STRUCT_DIRENT_D_TYPE AC_STRUCT_DIRENT_D_TYPE
if test "$sys_name" = sunos; then case "$host_os" in
solaris*)
# Solaris requires -lsocket -lnsl for network functions # Solaris requires -lsocket -lnsl for network functions
LIBS="-lsocket -lnsl $LIBS" LDFLAGS="-lsocket -lnsl $LDFLAGS"
fi ;;
esac
# Check for pubsetbuf. # Check for pubsetbuf.
@ -157,6 +151,30 @@ AX_BOOST_BASE([1.66], [CXXFLAGS="$BOOST_CPPFLAGS $CXXFLAGS"], [AC_MSG_ERROR([Nix
# ends up with LDFLAGS being empty, so we set it afterwards. # ends up with LDFLAGS being empty, so we set it afterwards.
LDFLAGS="$BOOST_LDFLAGS $LDFLAGS" LDFLAGS="$BOOST_LDFLAGS $LDFLAGS"
# On some platforms, new-style atomics need a helper library
AC_MSG_CHECKING(whether -latomic is needed)
AC_LINK_IFELSE([AC_LANG_SOURCE([[
#include <stdint.h>
uint64_t v;
int main() {
return (int)__atomic_load_n(&v, __ATOMIC_ACQUIRE);
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
LDFLAGS="-latomic $LDFLAGS"
fi
PKG_PROG_PKG_CONFIG
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
[Build shared libraries for Nix [default=yes]]),
shared=$enableval, shared=yes)
if test "$shared" = yes; then
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
else
AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.])
PKG_CONFIG="$PKG_CONFIG --static"
fi
# Look for OpenSSL, a required dependency. # Look for OpenSSL, a required dependency.
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
@ -205,24 +223,29 @@ AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
# Look for libbrotli{enc,dec}. # Look for libbrotli{enc,dec}.
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
# Look for libzstd.
PKG_CHECK_MODULES([LIBZSTD], [libzstd], [CXXFLAGS="$LIBZSTD_CFLAGS $CXXFLAGS"])
# Look for libseccomp, required for Linux sandboxing. # Look for libseccomp, required for Linux sandboxing.
if test "$sys_name" = linux; then case "$host_os" in
AC_ARG_ENABLE([seccomp-sandboxing], linux*)
AC_HELP_STRING([--disable-seccomp-sandboxing], AC_ARG_ENABLE([seccomp-sandboxing],
[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] AC_HELP_STRING([--disable-seccomp-sandboxing],
)) [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)]
if test "x$enable_seccomp_sandboxing" != "xno"; then ))
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], if test "x$enable_seccomp_sandboxing" != "xno"; then
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
have_seccomp=1 [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) have_seccomp=1
else AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
else
have_seccomp=
fi
;;
*)
have_seccomp= have_seccomp=
fi ;;
else esac
have_seccomp=
fi
AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
@ -238,6 +261,7 @@ if test -n "$enable_s3"; then
declare -a aws_version_tokens=($(printf '#include <aws/core/VersionConfig.h>\nAWS_SDK_VERSION_STRING' | $CPP $CPPFLAGS - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' ')) declare -a aws_version_tokens=($(printf '#include <aws/core/VersionConfig.h>\nAWS_SDK_VERSION_STRING' | $CPP $CPPFLAGS - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' '))
AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.]) AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.])
AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.]) AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.])
AC_DEFINE_UNQUOTED([AWS_VERSION_PATCH], ${aws_version_tokens@<:@2@:>@}, [Patch version of aws-sdk-cpp.])
fi fi
@ -269,9 +293,11 @@ AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
# This is needed if bzip2 is a static library, and the Nix libraries # This is needed if bzip2 is a static library, and the Nix libraries
# are dynamic. # are dynamic.
if test "$(uname)" = "Darwin"; then case "${host_os}" in
darwin*)
LDFLAGS="-all_load $LDFLAGS" LDFLAGS="-all_load $LDFLAGS"
fi ;;
esac
# Do we have GNU tar? # Do we have GNU tar?
@ -290,16 +316,6 @@ AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
sandbox_shell=$withval) sandbox_shell=$withval)
AC_SUBST(sandbox_shell) AC_SUBST(sandbox_shell)
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
[Build shared libraries for Nix [default=yes]]),
shared=$enableval, shared=yes)
if test "$shared" = yes; then
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
else
AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.])
fi
# Expand all variables in config.status. # Expand all variables in config.status.
test "$prefix" = NONE && prefix=$ac_default_prefix test "$prefix" = NONE && prefix=$ac_default_prefix
test "$exec_prefix" = NONE && exec_prefix='${prefix}' test "$exec_prefix" = NONE && exec_prefix='${prefix}'

View file

@ -89,6 +89,7 @@ downloadFile("binaryTarball.i686-linux", "1");
downloadFile("binaryTarball.x86_64-linux", "1"); downloadFile("binaryTarball.x86_64-linux", "1");
downloadFile("binaryTarball.aarch64-linux", "1"); downloadFile("binaryTarball.aarch64-linux", "1");
downloadFile("binaryTarball.x86_64-darwin", "1"); downloadFile("binaryTarball.x86_64-darwin", "1");
downloadFile("binaryTarball.aarch64-darwin", "1");
downloadFile("installerScript", "1"); downloadFile("installerScript", "1");
exit if $version =~ /pre/; exit if $version =~ /pre/;
@ -121,6 +122,7 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
" aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" .
"}\n"); "}\n");
system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die; system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
$(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons)) $(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons))

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Linux) ifdef HOST_LINUX
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644))) $(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Linux) ifdef HOST_LINUX
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644))) $(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))

View file

@ -11,8 +11,25 @@ noinst-scripts :=
man-pages := man-pages :=
install-tests := install-tests :=
dist-files := dist-files :=
OS = $(shell uname -s)
ifdef HOST_OS
HOST_KERNEL = $(firstword $(subst -, ,$(HOST_OS)))
ifeq ($(HOST_KERNEL), cygwin)
HOST_CYGWIN = 1
endif
ifeq ($(patsubst darwin%,,$(HOST_KERNEL)),)
HOST_DARWIN = 1
endif
ifeq ($(patsubst freebsd%,,$(HOST_KERNEL)),)
HOST_FREEBSD = 1
endif
ifeq ($(HOST_KERNEL), linux)
HOST_LINUX = 1
endif
ifeq ($(patsubst solaris%,,$(HOST_KERNEL)),)
HOST_SOLARIS = 1
endif
endif
# Hack to define a literal space. # Hack to define a literal space.
space := space :=
@ -52,16 +69,16 @@ endif
BUILD_SHARED_LIBS ?= 1 BUILD_SHARED_LIBS ?= 1
ifeq ($(BUILD_SHARED_LIBS), 1) ifeq ($(BUILD_SHARED_LIBS), 1)
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
else else
GLOBAL_CFLAGS += -fPIC GLOBAL_CFLAGS += -fPIC
GLOBAL_CXXFLAGS += -fPIC GLOBAL_CXXFLAGS += -fPIC
endif endif
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifneq ($(OS), SunOS) ifndef HOST_SOLARIS
ifneq ($(OS), FreeBSD) ifndef HOST_FREEBSD
GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
endif endif
endif endif

View file

@ -1,9 +1,9 @@
libs-list := libs-list :=
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
SO_EXT = dylib SO_EXT = dylib
else else
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
SO_EXT = dll SO_EXT = dll
else else
SO_EXT = so SO_EXT = so
@ -59,7 +59,7 @@ define build-library
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs)))) $(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH)) _libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
$(1)_INSTALL_DIR ?= $$(bindir) $(1)_INSTALL_DIR ?= $$(bindir)
else else
$(1)_INSTALL_DIR ?= $$(libdir) $(1)_INSTALL_DIR ?= $$(libdir)
@ -73,18 +73,18 @@ define build-library
ifeq ($(BUILD_SHARED_LIBS), 1) ifeq ($(BUILD_SHARED_LIBS), 1)
ifdef $(1)_ALLOW_UNDEFINED ifdef $(1)_ALLOW_UNDEFINED
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
$(1)_LDFLAGS += -undefined suppress -flat_namespace $(1)_LDFLAGS += -undefined suppress -flat_namespace
endif endif
else else
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifneq (CYGWIN,$(findstring CYGWIN,$(OS))) ifndef HOST_CYGWIN
$(1)_LDFLAGS += -Wl,-z,defs $(1)_LDFLAGS += -Wl,-z,defs
endif endif
endif endif
endif endif
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
$(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT) $(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT)
endif endif
@ -93,7 +93,7 @@ define build-library
$$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/ $$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/
$$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED) $$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED)
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
$(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d)) $(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d))
endif endif
$(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) $(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
@ -108,7 +108,7 @@ define build-library
$$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED))
$(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) $(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifeq ($(SET_RPATH_TO_LIBS), 1) ifeq ($(SET_RPATH_TO_LIBS), 1)
$(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR) $(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR)
else else

28
mk/run_test.sh Executable file
View file

@ -0,0 +1,28 @@
#!/bin/sh
set -u
red=""
green=""
yellow=""
normal=""
post_run_msg="ran test $1..."
if [ -t 1 ]; then
red=""
green=""
yellow=""
normal=""
fi
(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} init.sh 2>/dev/null > /dev/null)
log="$(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} $(basename $1) 2>&1)"
status=$?
if [ $status -eq 0 ]; then
echo "$post_run_msg [${green}PASS$normal]"
elif [ $status -eq 99 ]; then
echo "$post_run_msg [${yellow}SKIP$normal]"
else
echo "$post_run_msg [${red}FAIL$normal]"
echo "$log" | sed 's/^/ /'
exit "$status"
fi

View file

@ -1,45 +1,15 @@
# Run program $1 as part of make installcheck. # Run program $1 as part of make installcheck.
test-deps =
define run-install-test define run-install-test
installcheck: $1 installcheck: $1.test
_installcheck-list += $1 .PHONY: $1.test
$1.test: $1 $(test-deps)
@env TEST_NAME=$(notdir $(basename $1)) TESTS_ENVIRONMENT="$(tests-environment)" mk/run_test.sh $1 < /dev/null
endef endef
# Color code from https://unix.stackexchange.com/a/10065
installcheck:
@total=0; failed=0; \
red=""; \
green=""; \
yellow=""; \
normal=""; \
if [ -t 1 ]; then \
red=""; \
green=""; \
yellow=""; \
normal=""; \
fi; \
for i in $(_installcheck-list); do \
total=$$((total + 1)); \
printf "running test $$i..."; \
log="$$(cd $$(dirname $$i) && $(tests-environment) $$(basename $$i) 2>&1)"; \
status=$$?; \
if [ $$status -eq 0 ]; then \
echo " [$${green}PASS$$normal]"; \
elif [ $$status -eq 99 ]; then \
echo " [$${yellow}SKIP$$normal]"; \
else \
echo " [$${red}FAIL$$normal]"; \
echo "$$log" | sed 's/^/ /'; \
failed=$$((failed + 1)); \
fi; \
done; \
if [ "$$failed" != 0 ]; then \
echo "$${red}$$failed out of $$total tests failed $$normal"; \
exit 1; \
else \
echo "$${green}All tests succeeded$$normal"; \
fi
.PHONY: check installcheck .PHONY: check installcheck

View file

@ -28,6 +28,7 @@ Requires: curl
Requires: bzip2 Requires: bzip2
Requires: gzip Requires: gzip
Requires: xz Requires: xz
Requires: zstd
BuildRequires: bison BuildRequires: bison
BuildRequires: boost-devel >= 1.60 BuildRequires: boost-devel >= 1.60
BuildRequires: bzip2-devel BuildRequires: bzip2-devel

View file

@ -1,3 +1,4 @@
HOST_OS = @host_os@
CC = @CC@ CC = @CC@
CFLAGS = @CFLAGS@ CFLAGS = @CFLAGS@
CXX = @CXX@ CXX = @CXX@

View file

@ -10,6 +10,8 @@ AC_PROG_CC
AC_PROG_CXX AC_PROG_CXX
AX_CXX_COMPILE_STDCXX_11 AX_CXX_COMPILE_STDCXX_11
AC_CANONICAL_HOST
# Use 64-bit file system calls so that we can support files > 2 GiB. # Use 64-bit file system calls so that we can support files > 2 GiB.
AC_SYS_LARGEFILE AC_SYS_LARGEFILE

View file

@ -28,7 +28,7 @@ Store_CXXFLAGS = \
Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS) Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS)
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
archlib = $(shell perl -E 'use Config; print $$Config{archlib};') archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
libperl = $(shell perl -E 'use Config; print $$Config{libperl};') libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
Store_LDFLAGS += $(shell find ${archlib} -name ${libperl}) Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})

View file

@ -49,7 +49,7 @@ rec {
buildDeps = buildDeps =
[ curl [ curl
bzip2 xz brotli editline bzip2 xz brotli zstd editline
openssl pkgconfig sqlite openssl pkgconfig sqlite
boost boost
@ -57,7 +57,7 @@ rec {
git git
mercurial mercurial
] ]
++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] ++ lib.optionals stdenv.isLinux [libseccomp (pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ++ lib.optional (stdenv.isLinux || stdenv.isDarwin)
((aws-sdk-cpp.override { ((aws-sdk-cpp.override {

View file

@ -1,7 +1,7 @@
{ nix ? builtins.fetchGit ./. { nix ? builtins.fetchGit ./.
, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz , nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-21.05-small.tar.gz
, officialRelease ? false , officialRelease ? false
, systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ]
}: }:
let let
@ -108,7 +108,8 @@ let
buildInputs = buildInputs =
[ jobs.build.${system} curl bzip2 xz pkgconfig pkgs.perl boost ] [ jobs.build.${system} curl bzip2 xz pkgconfig pkgs.perl boost ]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
configureFlags = '' configureFlags = ''
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
@ -223,16 +224,6 @@ let
}; };
#rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ];
#deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ];
#deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ];
#deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ];
#deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ];
# System tests. # System tests.
tests.remoteBuilds = (import ./tests/remote-builds.nix rec { tests.remoteBuilds = (import ./tests/remote-builds.nix rec {
inherit nixpkgs; inherit nixpkgs;
@ -252,37 +243,6 @@ let
nix = build.${system}; inherit system; nix = build.${system}; inherit system;
}); });
tests.binaryTarball =
with import nixpkgs { system = "x86_64-linux"; };
vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test"
{ diskImage = vmTools.diskImages.ubuntu1204x86_64;
}
''
set -x
useradd -m alice
su - alice -c 'tar xf ${binaryTarball.x86_64-linux}/*.tar.*'
mkdir /dest-nix
mount -o bind /dest-nix /nix # Provide a writable /nix.
chown alice /nix
su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install'
su - alice -c 'nix-store --verify'
su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}'
# Check whether 'nix upgrade-nix' works.
cat > /tmp/paths.nix <<EOF
{
x86_64-linux = "${build.x86_64-linux}";
}
EOF
su - alice -c 'nix upgrade-nix -vvv --nix-store-paths-url file:///tmp/paths.nix'
(! [ -L /home/alice/.profile-1-link ])
su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}'
mkdir -p $out/nix-support
touch $out/nix-support/hydra-build-products
umount /nix
''); # */
/* /*
tests.evalNixpkgs = tests.evalNixpkgs =
import (nixpkgs + "/pkgs/top-level/make-tarball.nix") { import (nixpkgs + "/pkgs/top-level/make-tarball.nix") {
@ -315,7 +275,7 @@ let
substitute ${./scripts/install.in} $out/install \ substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings ${pkgs.lib.concatMapStrings
(system: "--replace '@binaryTarball_${system}@' $(nix hash-file --base16 --type sha256 ${binaryTarball.${system}}/*.tar.xz) ") (system: "--replace '@binaryTarball_${system}@' $(nix hash-file --base16 --type sha256 ${binaryTarball.${system}}/*.tar.xz) ")
[ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-darwin" "aarch64-linux" ]
} \ } \
--replace '@nixVersion@' ${build.x86_64-linux.src.version} --replace '@nixVersion@' ${build.x86_64-linux.src.version}
@ -325,55 +285,4 @@ let
}; };
makeRPM_i686 = makeRPM "i686-linux";
makeRPM_x86_64 = makeRPM "x86_64-linux";
makeRPM =
system: diskImageFun: extraPackages:
with import nixpkgs { inherit system; };
releaseTools.rpmBuild rec {
name = "nix-rpm";
src = jobs.tarball;
diskImage = (diskImageFun vmTools.diskImageFuns)
{ extraPackages =
[ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" "bison" "flex" ]
++ extraPackages; };
# At most 2047MB can be simulated in qemu-system-i386
memSize = 2047;
meta.schedulingPriority = 50;
postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck";
#enableParallelBuilding = true;
};
makeDeb_i686 = makeDeb "i686-linux";
makeDeb_x86_64 = makeDeb "x86_64-linux";
makeDeb =
system: diskImageFun: extraPackages: extraDebPackages:
with import nixpkgs { inherit system; };
releaseTools.debBuild {
name = "nix-deb";
src = jobs.tarball;
diskImage = (diskImageFun vmTools.diskImageFuns)
{ extraPackages =
[ "libsqlite3-dev" "libbz2-dev" "libcurl-dev" "libcurl3-nss" "libssl-dev" "liblzma-dev" "libseccomp-dev" "libsodium-dev" "libboost-all-dev" ]
++ extraPackages; };
memSize = 2047;
meta.schedulingPriority = 50;
postInstall = "make installcheck";
configureFlags = "--sysconfdir=/etc";
debRequires =
[ "curl" "libsqlite3-0" "libbz2-1.0" "bzip2" "xz-utils" "libssl1.0.0" "liblzma5" "libseccomp2" ]
++ extraDebPackages;
debMaintainer = "Eelco Dolstra <eelco.dolstra@logicblox.com>";
doInstallCheck = true;
#enableParallelBuilding = true;
};
in jobs in jobs

View file

@ -0,0 +1,46 @@
#!/usr/bin/env bash
((NEW_NIX_FIRST_BUILD_UID=301))
id_available(){
dscl . list /Users UniqueID | grep -E '\b'$1'\b' >/dev/null
}
change_nixbld_names_and_ids(){
local name uid next_id
((next_id=NEW_NIX_FIRST_BUILD_UID))
echo "Attempting to migrate nixbld users."
echo "Each user should change from nixbld# to _nixbld#"
echo "and their IDs relocated to $next_id+"
while read -r name uid; do
echo " Checking $name (uid: $uid)"
# iterate for a clean ID
while id_available "$next_id"; do
((next_id++))
if ((next_id >= 400)); then
echo "We've hit UID 400 without placing all of your users :("
echo "You should use the commands in this script as a starting"
echo "point to review your UID-space and manually move the"
echo "remaining users (or delete them, if you don't need them)."
exit 1
fi
done
if [[ $name == _* ]]; then
echo " It looks like $name has already been renamed--skipping."
else
# first 3 are cleanup, it's OK if they aren't here
sudo dscl . delete /Users/$name dsAttrTypeNative:_writers_passwd &>/dev/null || true
sudo dscl . change /Users/$name NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true
# remove existing user from group
sudo dseditgroup -o edit -t user -d $name nixbld || true
sudo dscl . change /Users/$name UniqueID $uid $next_id
sudo dscl . change /Users/$name RecordName $name _$name
# add renamed user to group
sudo dseditgroup -o edit -t user -a _$name nixbld
echo " $name migrated to _$name (uid: $next_id)"
fi
done < <(dscl . list /Users UniqueID | grep nixbld | sort -n -k2)
}
change_nixbld_names_and_ids

View file

@ -4,6 +4,8 @@ set -eu
set -o pipefail set -o pipefail
readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
NIX_FIRST_BUILD_UID="301"
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
dsclattr() { dsclattr() {
/usr/bin/dscl . -read "$1" \ /usr/bin/dscl . -read "$1" \

View file

@ -25,7 +25,9 @@ readonly RED='\033[31m'
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
readonly NIX_BUILD_GROUP_ID="30000" readonly NIX_BUILD_GROUP_ID="30000"
readonly NIX_BUILD_GROUP_NAME="nixbld" readonly NIX_BUILD_GROUP_NAME="nixbld"
readonly NIX_FIRST_BUILD_UID="30001" # darwin installer needs to override these
NIX_FIRST_BUILD_UID="30001"
NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
# Please don't change this. We don't support it, because the # Please don't change this. We don't support it, because the
# default shell profile that comes with Nix doesn't support it. # default shell profile that comes with Nix doesn't support it.
readonly NIX_ROOT="/nix" readonly NIX_ROOT="/nix"
@ -61,8 +63,10 @@ contactme() {
echo "If you can, open an issue at https://github.com/nixos/nix/issues" echo "If you can, open an issue at https://github.com/nixos/nix/issues"
echo "" echo ""
echo "Or feel free to contact the team," echo "Or feel free to contact the team,"
echo " - on IRC #nixos on irc.freenode.net" echo " - on Matrix #nix:nixos.org"
echo " - on IRC #nixos on irc.libera.chat"
echo " - on twitter @nixos_org" echo " - on twitter @nixos_org"
echo " - on our forum https://discourse.nixos.org/"
} }
uninstall_directions() { uninstall_directions() {
@ -102,7 +106,7 @@ EOF
} }
nix_user_for_core() { nix_user_for_core() {
printf "nixbld%d" "$1" printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1"
} }
nix_uid_for_core() { nix_uid_for_core() {

View file

@ -154,9 +154,15 @@ fi
mkdir -p $dest/store mkdir -p $dest/store
printf "copying Nix to %s..." "${dest}/store" >&2 printf "copying Nix to %s..." "${dest}/store" >&2
# Insert a newline if no progress is shown.
if [ ! -t 0 ]; then
echo ""
fi
for i in $(cd "$self/store" >/dev/null && echo ./*); do for i in $(cd "$self/store" >/dev/null && echo ./*); do
printf "." >&2 if [ -t 0 ]; then
printf "." >&2
fi
i_tmp="$dest/store/$i.$$" i_tmp="$dest/store/$i.$$"
if [ -e "$i_tmp" ]; then if [ -e "$i_tmp" ]; then
rm -rf "$i_tmp" rm -rf "$i_tmp"

View file

@ -29,8 +29,7 @@ case "$(uname -s).$(uname -m)" in
Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;; Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;;
Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;; Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;;
Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;; Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;;
# eventually maybe: system=arm64-darwin; hash=@binaryTarball_arm64-darwin@;; Darwin.arm64) system=aarch64-darwin; hash=@binaryTarball_aarch64-darwin@;;
Darwin.arm64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;;
*) oops "sorry, there is no binary distribution of Nix for your platform";; *) oops "sorry, there is no binary distribution of Nix for your platform";;
esac esac

View file

@ -17,11 +17,21 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
else else
# Fall back to what is in the nix profiles, favouring whatever is defined last. # Fall back to what is in the nix profiles, favouring whatever is defined last.
for i in $NIX_PROFILES; do check_nix_profiles() {
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then if [ "$ZSH_VERSION" ]; then
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt # Zsh by default doesn't split words in unquoted parameter expansion.
# Set local_options for these options to be reverted at the end of the function
# and shwordsplit to force splitting words in $NIX_PROFILES below.
setopt local_options shwordsplit
fi fi
done for i in $NIX_PROFILES; do
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
fi
done
}
check_nix_profiles
unset -f check_nix_profiles
fi fi
export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels" export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels"

View file

@ -9,7 +9,7 @@ libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexe
libexpr_LIBS = libutil libstore libexpr_LIBS = libutil libstore
libexpr_LDFLAGS = libexpr_LDFLAGS =
ifneq ($(OS), FreeBSD) ifdef HOST_LINUX
libexpr_LDFLAGS += -ldl libexpr_LDFLAGS += -ldl
endif endif

View file

@ -105,7 +105,7 @@ void ExprAttrs::show(std::ostream & str) const
str << "{ "; str << "{ ";
for (auto & i : attrs) for (auto & i : attrs)
if (i.second.inherited) if (i.second.inherited)
str << "inherit " << i.first << " " << "; "; str << "inherit " << i.first << "; ";
else else
str << i.first << " = " << *i.second.e << "; "; str << i.first << " = " << *i.second.e << "; ";
for (auto & i : dynamicAttrs) for (auto & i : dynamicAttrs)
@ -211,7 +211,7 @@ string showAttrPath(const AttrPath & attrPath)
if (i.symbol.set()) if (i.symbol.set())
out << i.symbol; out << i.symbol;
else else
out << "\"${" << *i.expr << "}\""; out << "${" << *i.expr << "}";
} }
return out.str(); return out.str();
} }

View file

@ -937,10 +937,16 @@ static void prim_hashFile(EvalState & state, const Pos & pos, Value * * args, Va
if (ht == htUnknown) if (ht == htUnknown)
throw Error(format("unknown hash type '%1%', at %2%") % type % pos); throw Error(format("unknown hash type '%1%', at %2%") % type % pos);
PathSet context; // discarded PathSet context;
Path p = state.coerceToPath(pos, *args[1], context); Path path = state.coerceToPath(pos, *args[1], context);
try {
state.realiseContext(context);
} catch (InvalidPathError & e) {
throw EvalError(format("cannot read '%1%', since path '%2%' is not valid, at %3%")
% path % e.path % pos);
}
mkString(v, hashFile(ht, state.checkSourcePath(p)).to_string(Base16, false), context); mkString(v, hashFile(ht, state.checkSourcePath(state.toRealPath(path, context))).to_string(Base16, false));
} }
/* Read a directory (without . or ..) */ /* Read a directory (without . or ..) */
@ -1350,6 +1356,10 @@ static void prim_catAttrs(EvalState & state, const Pos & pos, Value * * args, Va
static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
state.forceValue(*args[0]); state.forceValue(*args[0]);
if (args[0]->type == tPrimOpApp || args[0]->type == tPrimOp) {
state.mkAttrs(v, 0);
return;
}
if (args[0]->type != tLambda) if (args[0]->type != tLambda)
throw TypeError(format("'functionArgs' requires a function, at %1%") % pos); throw TypeError(format("'functionArgs' requires a function, at %1%") % pos);
@ -1817,7 +1827,7 @@ static void prim_hashString(EvalState & state, const Pos & pos, Value * * args,
PathSet context; // discarded PathSet context; // discarded
string s = state.forceString(*args[1], context, pos); string s = state.forceString(*args[1], context, pos);
mkString(v, hashString(ht, s).to_string(Base16, false), context); mkString(v, hashString(ht, s).to_string(Base16, false));
} }
@ -2075,7 +2085,7 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
else if (n == "name") else if (n == "name")
request.name = state.forceStringNoCtx(*attr.value, *attr.pos); request.name = state.forceStringNoCtx(*attr.value, *attr.pos);
else else
throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % attr.name % who % attr.pos); throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % attr.name % who % *attr.pos);
} }
if (request.uri.empty()) if (request.uri.empty())

View file

@ -6,6 +6,7 @@
#include "hash.hh" #include "hash.hh"
#include <sys/time.h> #include <sys/time.h>
#include <sys/wait.h>
#include <regex> #include <regex>
@ -173,7 +174,7 @@ GitInfo exportGit(ref<Store> store, const std::string & uri,
Path tmpDir = createTempDir(); Path tmpDir = createTempDir();
AutoDelete delTmpDir(tmpDir, true); AutoDelete delTmpDir(tmpDir, true);
runProgram("tar", true, { "x", "-C", tmpDir }, tar); runProgram("tar", true, { "-x", "-f", "-", "-C", tmpDir }, tar);
gitInfo.storePath = store->addToStore(name, tmpDir); gitInfo.storePath = store->addToStore(name, tmpDir);

View file

@ -14,6 +14,14 @@
#include <sys/stat.h> #include <sys/stat.h>
#include <unistd.h> #include <unistd.h>
#include <signal.h> #include <signal.h>
#ifdef __linux__
#include <features.h>
#endif
#ifdef __GLIBC__
#include <gnu/lib-names.h>
#include <nss.h>
#include <dlfcn.h>
#endif
#include <openssl/crypto.h> #include <openssl/crypto.h>
@ -95,6 +103,40 @@ static void opensslLockCallback(int mode, int type, const char * file, int line)
} }
#endif #endif
static std::once_flag dns_resolve_flag;
static void preloadNSS() {
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
load its lookup libraries in the parent before any child gets a chance to. */
std::call_once(dns_resolve_flag, []() {
#ifdef __GLIBC__
/* On linux, glibc will run every lookup through the nss layer.
* That means every lookup goes, by default, through nscd, which acts as a local
* cache.
* Because we run builds in a sandbox, we also remove access to nscd otherwise
* lookups would leak into the sandbox.
*
* But now we have a new problem, we need to make sure the nss_dns backend that
* does the dns lookups when nscd is not available is loaded or available.
*
* We can't make it available without leaking nix's environment, so instead we'll
* load the backend, and configure nss so it does not try to run dns lookups
* through nscd.
*
* This is technically only used for builtins:fetch* functions so we only care
* about dns.
*
* All other platforms are unaffected.
*/
if (dlopen (LIBNSS_DNS_SO, RTLD_NOW) == NULL) {
printMsg(Verbosity::lvlWarn, fmt("Unable to load nss_dns backend"));
}
__nss_configure_lookup ("hosts", "dns");
#endif
});
}
static void sigHandler(int signo) { } static void sigHandler(int signo) { }
@ -158,6 +200,8 @@ void initNix()
if (hasPrefix(getEnv("TMPDIR"), "/var/folders/")) if (hasPrefix(getEnv("TMPDIR"), "/var/folders/"))
unsetenv("TMPDIR"); unsetenv("TMPDIR");
#endif #endif
preloadNSS();
} }

View file

@ -34,7 +34,6 @@
#include <sys/resource.h> #include <sys/resource.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <fcntl.h> #include <fcntl.h>
#include <netdb.h>
#include <unistd.h> #include <unistd.h>
#include <errno.h> #include <errno.h>
#include <cstring> #include <cstring>
@ -45,7 +44,6 @@
/* Includes required for chroot support. */ /* Includes required for chroot support. */
#if __linux__ #if __linux__
#include <sys/socket.h>
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <net/if.h> #include <net/if.h>
#include <netinet/ip.h> #include <netinet/ip.h>
@ -181,6 +179,8 @@ public:
virtual string key() = 0; virtual string key() = 0;
virtual void cleanup() { }
protected: protected:
virtual void amDone(ExitCode result); virtual void amDone(ExitCode result);
@ -426,6 +426,8 @@ void Goal::amDone(ExitCode result)
} }
waiters.clear(); waiters.clear();
worker.removeGoal(shared_from_this()); worker.removeGoal(shared_from_this());
cleanup();
} }
@ -1222,8 +1224,13 @@ void DerivationGoal::outputsSubstituted()
/* If the substitutes form an incomplete closure, then we should /* If the substitutes form an incomplete closure, then we should
build the dependencies of this derivation, but after that, we build the dependencies of this derivation, but after that, we
can still use the substitutes for this derivation itself. */ can still use the substitutes for this derivation itself.
if (nrIncompleteClosure > 0) retrySubstitution = true;
If the nrIncompleteClosure != nrFailed, we have another issue as well.
In particular, it may be the case that the hole in the closure is
an output of the current derivation, which causes a loop if retried.
*/
if (nrIncompleteClosure > 0 && nrIncompleteClosure == nrFailed) retrySubstitution = true;
nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
@ -1881,22 +1888,6 @@ PathSet DerivationGoal::exportReferences(PathSet storePaths)
return paths; return paths;
} }
static std::once_flag dns_resolve_flag;
static void preloadNSS() {
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
load its lookup libraries in the parent before any child gets a chance to. */
std::call_once(dns_resolve_flag, []() {
struct addrinfo *res = NULL;
if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) {
if (res) freeaddrinfo(res);
}
});
}
void DerivationGoal::startBuilder() void DerivationGoal::startBuilder()
{ {
/* Right platform? */ /* Right platform? */
@ -1908,9 +1899,6 @@ void DerivationGoal::startBuilder()
settings.thisSystem, settings.thisSystem,
concatStringsSep(", ", settings.systemFeatures)); concatStringsSep(", ", settings.systemFeatures));
if (drv->isBuiltin())
preloadNSS();
#if __APPLE__ #if __APPLE__
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif #endif
@ -2060,7 +2048,9 @@ void DerivationGoal::startBuilder()
if (!found) if (!found)
throw Error(format("derivation '%1%' requested impure path '%2%', but it was not in allowed-impure-host-deps") % drvPath % i); throw Error(format("derivation '%1%' requested impure path '%2%', but it was not in allowed-impure-host-deps") % drvPath % i);
dirsInChroot[i] = i; /* Allow files in __impureHostDeps to be missing; e.g.
macOS 11+ has no /usr/lib/libSystem*.dylib */
dirsInChroot[i] = {i, true};
} }
#if __linux__ #if __linux__
@ -2834,8 +2824,6 @@ void DerivationGoal::runChild()
ss.push_back("/etc/services"); ss.push_back("/etc/services");
ss.push_back("/etc/hosts"); ss.push_back("/etc/hosts");
if (pathExists("/var/run/nscd/socket"))
ss.push_back("/var/run/nscd/socket");
} }
for (auto & i : ss) dirsInChroot.emplace(i, i); for (auto & i : ss) dirsInChroot.emplace(i, i);
@ -3911,6 +3899,8 @@ public:
void handleChildOutput(int fd, const string & data) override; void handleChildOutput(int fd, const string & data) override;
void handleEOF(int fd) override; void handleEOF(int fd) override;
void cleanup() override;
Path getStorePath() { return storePath; } Path getStorePath() { return storePath; }
void amDone(ExitCode result) override void amDone(ExitCode result) override
@ -3934,15 +3924,7 @@ SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, Repa
SubstitutionGoal::~SubstitutionGoal() SubstitutionGoal::~SubstitutionGoal()
{ {
try { cleanup();
if (thr.joinable()) {
// FIXME: signal worker thread to quit.
thr.join();
worker.childTerminated(this);
}
} catch (...) {
ignoreException();
}
} }
@ -3977,6 +3959,8 @@ void SubstitutionGoal::tryNext()
{ {
trace("trying next substituter"); trace("trying next substituter");
cleanup();
if (subs.size() == 0) { if (subs.size() == 0) {
/* None left. Terminate this goal and let someone else deal /* None left. Terminate this goal and let someone else deal
with it. */ with it. */
@ -4104,7 +4088,7 @@ void SubstitutionGoal::tryToRun()
thr = std::thread([this]() { thr = std::thread([this]() {
try { try {
/* Wake up the worker loop when we're done. */ /* Wake up the worker loop when we're done. */
Finally updateStats([this]() { outPipe.writeSide = -1; }); Finally updateStats([this]() { outPipe.writeSide.close(); });
Activity act(*logger, actSubstitute, Logger::Fields{storePath, sub->getUri()}); Activity act(*logger, actSubstitute, Logger::Fields{storePath, sub->getUri()});
PushActivity pact(act.id); PushActivity pact(act.id);
@ -4188,6 +4172,20 @@ void SubstitutionGoal::handleEOF(int fd)
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
} }
void SubstitutionGoal::cleanup()
{
try {
if (thr.joinable()) {
// FIXME: signal worker thread to quit.
thr.join();
worker.childTerminated(this);
}
outPipe.close();
} catch (...) {
ignoreException();
}
}
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////

View file

@ -349,6 +349,13 @@ struct CurlDownloader : public Downloader
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
{ {
result.cached = httpStatus == 304; result.cached = httpStatus == 304;
// In 2021, GitHub responds to If-None-Match with 304,
// but omits ETag. We just use the If-None-Match etag
// since 304 implies they are the same.
if (httpStatus == 304 && result.etag == "")
result.etag = request.expectedETag;
act.progress(result.bodySize, result.bodySize); act.progress(result.bodySize, result.bodySize);
done = true; done = true;
callback(std::move(result)); callback(std::move(result));

View file

@ -34,7 +34,7 @@ Settings::Settings()
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR))) , nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)))
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR))) , nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)))
, nixManDir(canonPath(NIX_MAN_DIR)) , nixManDir(canonPath(NIX_MAN_DIR))
, nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH)) , nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH", nixStateDir + DEFAULT_SOCKET_PATH)))
{ {
buildUsersGroup = getuid() == 0 ? "nixbld" : ""; buildUsersGroup = getuid() == 0 ? "nixbld" : "";
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1"; lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";

View file

@ -1029,6 +1029,40 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
throw Error("size mismatch importing path '%s';\n wanted: %s\n got: %s", throw Error("size mismatch importing path '%s';\n wanted: %s\n got: %s",
info.path, info.narSize, hashResult.second); info.path, info.narSize, hashResult.second);
if (!info.ca.empty()) {
auto ca = info.ca;
if (hasPrefix(ca, "fixed:")) {
bool recursive = ca.compare(6, 2, "r:") == 0;
Hash expectedHash(std::string(ca, recursive ? 8 : 6));
if (info.references.empty()) {
auto actualFoHash = hashCAPath(
recursive,
expectedHash.type,
realPath
);
if (ca != actualFoHash) {
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
info.path,
ca,
actualFoHash);
}
} else {
throw Error("path '%s' claims to be content-addressed, but has references. This isnt allowed",
info.path);
}
} else if (hasPrefix(ca, "text:")) {
Hash textHash(std::string(ca, 5));
auto actualTextHash = hashString(htSHA256, readFile(realPath));
if (textHash != actualTextHash) {
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
info.path,
textHash.to_string(Base32, true),
actualTextHash.to_string(Base32, true));
}
}
}
autoGC(); autoGC();
canonicalisePathMetaData(realPath, -1); canonicalisePathMetaData(realPath, -1);
@ -1450,4 +1484,20 @@ void LocalStore::createUser(const std::string & userName, uid_t userId)
} }
std::string LocalStore::hashCAPath(
bool recursive,
const HashType & hashType,
const Path & path
)
{
HashSink caSink(hashType);
if (recursive) {
dumpPath(path, caSink);
} else {
readFile(path, caSink);
}
auto hash = caSink.finish().first;
return makeFixedOutputCA(recursive, hash);
}
} }

View file

@ -295,8 +295,14 @@ private:
void createUser(const std::string & userName, uid_t userId) override; void createUser(const std::string & userName, uid_t userId) override;
friend class DerivationGoal; std::string hashCAPath(
friend class SubstitutionGoal; bool recursive,
const HashType & hashType,
const Path & path
);
friend struct DerivationGoal;
friend struct SubstitutionGoal;
}; };

View file

@ -9,7 +9,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc)
libstore_LIBS = libutil libstore_LIBS = libutil
libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
ifneq ($(OS), FreeBSD) ifdef HOST_LINUX
libstore_LDFLAGS += -ldl libstore_LDFLAGS += -ldl
endif endif
@ -21,7 +21,7 @@ ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
endif endif
ifeq ($(OS), SunOS) ifdef HOST_SOLARIS
libstore_LDFLAGS += -lsocket libstore_LDFLAGS += -lsocket
endif endif

View file

@ -56,6 +56,10 @@ class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem
{ {
debug("AWS: %s", chomp(statement)); debug("AWS: %s", chomp(statement));
} }
#if !(AWS_VERSION_MAJOR <= 1 && AWS_VERSION_MINOR <= 7 && AWS_VERSION_PATCH <= 115)
void Flush() override {}
#endif
}; };
static void initAWS() static void initAWS()

View file

@ -32,7 +32,9 @@
(literal "/tmp") (subpath TMPDIR)) (literal "/tmp") (subpath TMPDIR))
; Some packages like to read the system version. ; Some packages like to read the system version.
(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist")) (allow file-read*
(literal "/System/Library/CoreServices/SystemVersion.plist")
(literal "/System/Library/CoreServices/SystemVersionCompat.plist"))
; Without this line clang cannot write to /dev/null, breaking some configure tests. ; Without this line clang cannot write to /dev/null, breaking some configure tests.
(allow file-read-metadata (literal "/dev")) (allow file-read-metadata (literal "/dev"))

View file

@ -1,4 +1,5 @@
#include "sqlite.hh" #include "sqlite.hh"
#include "globals.hh"
#include "util.hh" #include "util.hh"
#include <sqlite3.h> #include <sqlite3.h>
@ -27,8 +28,12 @@ namespace nix {
SQLite::SQLite(const Path & path) SQLite::SQLite(const Path & path)
{ {
// useSQLiteWAL also indicates what virtual file system we need. Using
// `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
// for Linux (WSL) where useSQLiteWAL should be false by default.
const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
if (sqlite3_open_v2(path.c_str(), &db, if (sqlite3_open_v2(path.c_str(), &db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK) SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, vfs) != SQLITE_OK)
throw Error(format("cannot open SQLite database '%s'") % path); throw Error(format("cannot open SQLite database '%s'") % path);
} }

View file

@ -5,8 +5,10 @@
#include <lzma.h> #include <lzma.h>
#include <bzlib.h> #include <bzlib.h>
#include <zstd.h>
#include <cstdio> #include <cstdio>
#include <cstring> #include <cstring>
#include <vector>
#include <brotli/decode.h> #include <brotli/decode.h>
#include <brotli/encode.h> #include <brotli/encode.h>
@ -198,6 +200,78 @@ struct BrotliDecompressionSink : ChunkedCompressionSink
} }
}; };
struct ZstdDecompressionSink : CompressionSink
{
Sink & nextSink;
ZSTD_DStream *strm;
std::vector<uint8_t> inbuf;
size_t outbuf_size = ZSTD_DStreamOutSize();
uint8_t *outbuf = new uint8_t[outbuf_size];
ZstdDecompressionSink(Sink & nextSink) : nextSink(nextSink)
{
strm = ZSTD_createDStream();
if (!strm)
throw CompressionError("unable to initialise zstd decoder");
ZSTD_initDStream(strm);
}
~ZstdDecompressionSink()
{
delete[] outbuf;
ZSTD_freeDStream(strm);
}
void finish() override
{
// this call doesn't make any sense, but it's here for consistency with the other compression sinks
// CompressionSink inherits from BufferedSink, but none of the subclasses appear to ever make use of the buffer
flush();
// if we still have undecoded data in the input buffer, we can't signal EOF to libzstd
// if we don't, then we're done here anyway
if (inbuf.size())
throw CompressionError("received unexpected EOF while decompressing zstd file");
nextSink(nullptr, 0);
}
void write(const unsigned char * data, size_t len) override
{
inbuf.insert(inbuf.end(), data, data + len);
ZSTD_inBuffer in = {
.src = inbuf.data(),
.size = inbuf.size(),
.pos = 0
};
ZSTD_outBuffer out = {
.dst = outbuf,
.size = outbuf_size,
.pos = 0
};
while (in.pos < in.size) {
out.pos = 0;
size_t ret = ZSTD_decompressStream(strm, &out, &in);
if (ZSTD_isError(ret))
throw CompressionError("error %s while decompressing zstd file", ZSTD_getErrorName(ret));
if (out.pos)
nextSink(outbuf, out.pos);
else
break;
}
// drop consumed input
inbuf.erase(inbuf.begin(), inbuf.begin() + in.pos);
}
};
ref<std::string> decompress(const std::string & method, const std::string & in) ref<std::string> decompress(const std::string & method, const std::string & in)
{ {
StringSink ssink; StringSink ssink;
@ -217,6 +291,8 @@ ref<CompressionSink> makeDecompressionSink(const std::string & method, Sink & ne
return make_ref<BzipDecompressionSink>(nextSink); return make_ref<BzipDecompressionSink>(nextSink);
else if (method == "br") else if (method == "br")
return make_ref<BrotliDecompressionSink>(nextSink); return make_ref<BrotliDecompressionSink>(nextSink);
else if (method == "zstd")
return make_ref<ZstdDecompressionSink>(nextSink);
else else
throw UnknownCompressionMethod("unknown compression method '%s'", method); throw UnknownCompressionMethod("unknown compression method '%s'", method);
} }

View file

@ -1,6 +1,7 @@
#include "json.hh" #include "json.hh"
#include <iomanip> #include <iomanip>
#include <cstdint>
#include <cstring> #include <cstring>
namespace nix { namespace nix {

View file

@ -6,4 +6,4 @@ libutil_DIR := $(d)
libutil_SOURCES := $(wildcard $(d)/*.cc) libutil_SOURCES := $(wildcard $(d)/*.cc)
libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(BOOST_LDFLAGS) -lboost_context libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(LIBZSTD_LIBS) $(BOOST_LDFLAGS) -lboost_context

View file

@ -753,6 +753,7 @@ void AutoCloseFD::close()
if (::close(fd) == -1) if (::close(fd) == -1)
/* This should never happen. */ /* This should never happen. */
throw SysError(format("closing file descriptor %1%") % fd); throw SysError(format("closing file descriptor %1%") % fd);
fd = -1;
} }
} }
@ -770,6 +771,12 @@ int AutoCloseFD::release()
return oldFD; return oldFD;
} }
void Pipe::close()
{
readSide.close();
writeSide.close();
}
void Pipe::create() void Pipe::create()
{ {
@ -1080,7 +1087,7 @@ void runProgram2(const RunOptions & options)
throw SysError("executing '%1%'", options.program); throw SysError("executing '%1%'", options.program);
}, processOptions); }, processOptions);
out.writeSide = -1; out.writeSide.close();
std::thread writerThread; std::thread writerThread;
@ -1093,7 +1100,7 @@ void runProgram2(const RunOptions & options)
if (source) { if (source) {
in.readSide = -1; in.readSide.close();
writerThread = std::thread([&]() { writerThread = std::thread([&]() {
try { try {
std::vector<unsigned char> buf(8 * 1024); std::vector<unsigned char> buf(8 * 1024);
@ -1110,7 +1117,7 @@ void runProgram2(const RunOptions & options)
} catch (...) { } catch (...) {
promise.set_exception(std::current_exception()); promise.set_exception(std::current_exception());
} }
in.writeSide = -1; in.writeSide.close();
}); });
} }

View file

@ -190,7 +190,6 @@ public:
class AutoCloseFD class AutoCloseFD
{ {
int fd; int fd;
void close();
public: public:
AutoCloseFD(); AutoCloseFD();
AutoCloseFD(int fd); AutoCloseFD(int fd);
@ -202,6 +201,7 @@ public:
int get() const; int get() const;
explicit operator bool() const; explicit operator bool() const;
int release(); int release();
void close();
}; };
@ -210,6 +210,7 @@ class Pipe
public: public:
AutoCloseFD readSide, writeSide; AutoCloseFD readSide, writeSide;
void create(); void create();
void close();
}; };

View file

@ -106,7 +106,7 @@ static void _main(int argc, char * * argv)
// Heuristic to see if we're invoked as a shebang script, namely, // Heuristic to see if we're invoked as a shebang script, namely,
// if we have at least one argument, it's the name of an // if we have at least one argument, it's the name of an
// executable file, and it starts with "#!". // executable file, and it starts with "#!".
if (runEnv && argc > 1 && !std::regex_search(argv[1], std::regex("nix-shell"))) { if (runEnv && argc > 1) {
script = argv[1]; script = argv[1];
try { try {
auto lines = tokenizeString<Strings>(readFile(script), "\n"); auto lines = tokenizeString<Strings>(readFile(script), "\n");
@ -425,6 +425,7 @@ static void _main(int argc, char * * argv)
"unset NIX_ENFORCE_PURITY; " "unset NIX_ENFORCE_PURITY; "
"shopt -u nullglob; " "shopt -u nullglob; "
"unset TZ; %6%" "unset TZ; %6%"
"shopt -s execfail;"
"%7%", "%7%",
(Path) tmpDir, (Path) tmpDir,
(pure ? "" : "p=$PATH; "), (pure ? "" : "p=$PATH; "),

View file

@ -901,7 +901,11 @@ static PeerInfo getPeerInfo(int remote)
#if defined(SO_PEERCRED) #if defined(SO_PEERCRED)
#if defined(__OpenBSD__)
struct sockpeercred cred;
#else
ucred cred; ucred cred;
#endif
socklen_t credLen = sizeof(cred); socklen_t credLen = sizeof(cred);
if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) == -1) if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) == -1)
throw SysError("getting peer credentials"); throw SysError("getting peer credentials");

View file

@ -50,10 +50,12 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
output paths, and optionally the derivation path, as well output paths, and optionally the derivation path, as well
as the meta attributes. */ as the meta attributes. */
Path drvPath = keepDerivations ? i.queryDrvPath() : ""; Path drvPath = keepDerivations ? i.queryDrvPath() : "";
DrvInfo::Outputs outputs = i.queryOutputs(true);
StringSet metaNames = i.queryMetaNames();
Value & v(*state.allocValue()); Value & v(*state.allocValue());
manifest.listElems()[n++] = &v; manifest.listElems()[n++] = &v;
state.mkAttrs(v, 16); state.mkAttrs(v, 7 + outputs.size());
mkString(*state.allocAttr(v, state.sType), "derivation"); mkString(*state.allocAttr(v, state.sType), "derivation");
mkString(*state.allocAttr(v, state.sName), i.queryName()); mkString(*state.allocAttr(v, state.sName), i.queryName());
@ -65,7 +67,6 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
mkString(*state.allocAttr(v, state.sDrvPath), i.queryDrvPath()); mkString(*state.allocAttr(v, state.sDrvPath), i.queryDrvPath());
// Copy each output meant for installation. // Copy each output meant for installation.
DrvInfo::Outputs outputs = i.queryOutputs(true);
Value & vOutputs = *state.allocAttr(v, state.sOutputs); Value & vOutputs = *state.allocAttr(v, state.sOutputs);
state.mkList(vOutputs, outputs.size()); state.mkList(vOutputs, outputs.size());
unsigned int m = 0; unsigned int m = 0;
@ -85,8 +86,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
// Copy the meta attributes. // Copy the meta attributes.
Value & vMeta = *state.allocAttr(v, state.sMeta); Value & vMeta = *state.allocAttr(v, state.sMeta);
state.mkAttrs(vMeta, 16); state.mkAttrs(vMeta, metaNames.size());
StringSet metaNames = i.queryMetaNames();
for (auto & j : metaNames) { for (auto & j : metaNames) {
Value * v = i.queryMeta(j); Value * v = i.queryMeta(j);
if (!v) continue; if (!v) continue;

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
programs += resolve-system-dependencies programs += resolve-system-dependencies
endif endif

View file

@ -1,6 +1,6 @@
set -e set -e
export TEST_ROOT=$(realpath ${TMPDIR:-/tmp}/nix-test) export TEST_ROOT=$(realpath ${TMPDIR:-/tmp}/nix-test)/${TEST_NAME:-default}
export NIX_STORE_DIR export NIX_STORE_DIR
if ! NIX_STORE_DIR=$(readlink -f $TEST_ROOT/store 2> /dev/null); then if ! NIX_STORE_DIR=$(readlink -f $TEST_ROOT/store 2> /dev/null); then
# Maybe the build directory is symlinked. # Maybe the build directory is symlinked.
@ -11,6 +11,7 @@ export NIX_LOCALSTATE_DIR=$TEST_ROOT/var
export NIX_LOG_DIR=$TEST_ROOT/var/log/nix export NIX_LOG_DIR=$TEST_ROOT/var/log/nix
export NIX_STATE_DIR=$TEST_ROOT/var/nix export NIX_STATE_DIR=$TEST_ROOT/var/nix
export NIX_CONF_DIR=$TEST_ROOT/etc export NIX_CONF_DIR=$TEST_ROOT/etc
export NIX_DAEMON_SOCKET_PATH=$TEST_ROOT/daemon-socket
export _NIX_TEST_SHARED=$TEST_ROOT/shared export _NIX_TEST_SHARED=$TEST_ROOT/shared
if [[ -n $NIX_STORE ]]; then if [[ -n $NIX_STORE ]]; then
export _NIX_TEST_NO_SANDBOX=1 export _NIX_TEST_NO_SANDBOX=1
@ -72,7 +73,7 @@ startDaemon() {
rm -f $NIX_STATE_DIR/daemon-socket/socket rm -f $NIX_STATE_DIR/daemon-socket/socket
nix-daemon & nix-daemon &
for ((i = 0; i < 30; i++)); do for ((i = 0; i < 30; i++)); do
if [ -e $NIX_STATE_DIR/daemon-socket/socket ]; then break; fi if [ -e $NIX_DAEMON_SOCKET_PATH ]; then break; fi
sleep 1 sleep 1
done done
pidDaemon=$! pidDaemon=$!

View file

@ -12,6 +12,7 @@ nix_tests = \
timeout.sh secure-drv-outputs.sh nix-channel.sh \ timeout.sh secure-drv-outputs.sh nix-channel.sh \
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \
substitute-with-invalid-ca.sh \
check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \
placeholders.sh nix-shell.sh \ placeholders.sh nix-shell.sh \
linux-sandbox.sh \ linux-sandbox.sh \
@ -39,4 +40,4 @@ tests-environment = NIX_REMOTE= $(bash) -e
clean-files += $(d)/common.sh clean-files += $(d)/common.sh
installcheck: $(d)/common.sh $(d)/plugins/libplugintest.$(SO_EXT) test-deps += tests/common.sh tests/plugins/libplugintest.$(SO_EXT)

View file

@ -2,7 +2,9 @@
{ nixpkgs, system, nix }: { nixpkgs, system, nix }:
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; with import (nixpkgs + "/nixos/lib/testing-python.nix") {
inherit system;
};
makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in {
@ -24,41 +26,46 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in {
}; };
}; };
testScript = { nodes }: testScript = { nodes }: ''
'' # fmt: off
startAll; import subprocess
# Create an SSH key on the client. start_all()
my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`;
$client->succeed("mkdir -m 700 /root/.ssh");
$client->copyFileFromHost("key", "/root/.ssh/id_ed25519");
$client->succeed("chmod 600 /root/.ssh/id_ed25519");
# Install the SSH key on the server. # Create an SSH key on the client.
$server->succeed("mkdir -m 700 /root/.ssh"); subprocess.run([
$server->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys"); "${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", ""
$server->waitForUnit("sshd"); ], capture_output=True, check=True)
$client->waitForUnit("network.target");
$client->succeed("ssh -o StrictHostKeyChecking=no " . $server->name() . " 'echo hello world'");
# Copy the closure of package A from the client to the server. client.succeed("mkdir -m 700 /root/.ssh")
$server->fail("nix-store --check-validity ${pkgA}"); client.copy_from_host("key", "/root/.ssh/id_ed25519")
$client->succeed("nix-copy-closure --to server --gzip ${pkgA} >&2"); client.succeed("chmod 600 /root/.ssh/id_ed25519")
$server->succeed("nix-store --check-validity ${pkgA}");
# Copy the closure of package B from the server to the client. # Install the SSH key on the server.
$client->fail("nix-store --check-validity ${pkgB}"); server.succeed("mkdir -m 700 /root/.ssh")
$client->succeed("nix-copy-closure --from server --gzip ${pkgB} >&2"); server.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
$client->succeed("nix-store --check-validity ${pkgB}"); server.wait_for_unit("sshd")
client.wait_for_unit("network.target")
client.succeed(f"ssh -o StrictHostKeyChecking=no {server.name} 'echo hello world'")
# Copy the closure of package C via the SSH substituter. # Copy the closure of package A from the client to the server.
$client->fail("nix-store -r ${pkgC}"); server.fail("nix-store --check-validity ${pkgA}")
# FIXME client.succeed("nix-copy-closure --to server --gzip ${pkgA} >&2")
#$client->succeed( server.succeed("nix-store --check-validity ${pkgA}")
# "nix-store --option use-ssh-substituter true"
# . " --option ssh-substituter-hosts root\@server"
# . " -r ${pkgC} >&2");
#$client->succeed("nix-store --check-validity ${pkgC}");
'';
# Copy the closure of package B from the server to the client.
client.fail("nix-store --check-validity ${pkgB}")
client.succeed("nix-copy-closure --from server --gzip ${pkgB} >&2")
client.succeed("nix-store --check-validity ${pkgB}")
# Copy the closure of package C via the SSH substituter.
client.fail("nix-store -r ${pkgC}")
# FIXME
# client.succeed(
# "nix-store --option use-ssh-substituter true"
# " --option ssh-substituter-hosts root\@server"
# " -r ${pkgC} >&2"
# )
# client.succeed("nix-store --check-validity ${pkgC}")
'';
}) })

View file

@ -2,7 +2,9 @@
{ nixpkgs, system, nix }: { nixpkgs, system, nix }:
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; with import (nixpkgs + "/nixos/lib/testing-python.nix") {
inherit system;
};
makeTest ( makeTest (
@ -65,44 +67,46 @@ in
}; };
}; };
testScript = { nodes }: testScript = { nodes }: ''
'' # fmt: off
startAll; import subprocess
# Create an SSH key on the client. start_all()
my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`;
$client->succeed("mkdir -p -m 700 /root/.ssh");
$client->copyFileFromHost("key", "/root/.ssh/id_ed25519");
$client->succeed("chmod 600 /root/.ssh/id_ed25519");
# Install the SSH key on the builders. # Create an SSH key on the client.
$client->waitForUnit("network.target"); subprocess.run([
foreach my $builder ($builder1, $builder2) { "${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", ""
$builder->succeed("mkdir -p -m 700 /root/.ssh"); ], capture_output=True, check=True)
$builder->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys"); client.succeed("mkdir -p -m 700 /root/.ssh")
$builder->waitForUnit("sshd"); client.copy_from_host("key", "/root/.ssh/id_ed25519")
$client->succeed("ssh -o StrictHostKeyChecking=no " . $builder->name() . " 'echo hello world'"); client.succeed("chmod 600 /root/.ssh/id_ed25519")
}
# Perform a build and check that it was performed on the builder. # Install the SSH key on the builders.
my $out = $client->succeed( client.wait_for_unit("network.target")
"nix-build ${expr nodes.client.config 1} 2> build-output", for builder in [builder1, builder2]:
"grep -q Hello build-output" builder.succeed("mkdir -p -m 700 /root/.ssh")
); builder.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
$builder1->succeed("test -e $out"); builder.wait_for_unit("sshd")
client.succeed(f"ssh -o StrictHostKeyChecking=no {builder.name} 'echo hello world'")
# And a parallel build. # Perform a build and check that it was performed on the builder.
my ($out1, $out2) = split /\s/, out = client.succeed(
$client->succeed('nix-store -r $(nix-instantiate ${expr nodes.client.config 2})\!out $(nix-instantiate ${expr nodes.client.config 3})\!out'); "nix-build ${expr nodes.client.config 1} 2> build-output",
$builder1->succeed("test -e $out1 -o -e $out2"); "grep -q Hello build-output"
$builder2->succeed("test -e $out1 -o -e $out2"); )
builder1.succeed(f"test -e {out}")
# And a failing build. # And a parallel build.
$client->fail("nix-build ${expr nodes.client.config 5}"); paths = client.succeed(r'nix-store -r $(nix-instantiate ${expr nodes.client.config 2})\!out $(nix-instantiate ${expr nodes.client.config 3})\!out')
out1, out2 = paths.split()
builder1.succeed(f"test -e {out1} -o -e {out2}")
builder2.succeed(f"test -e {out1} -o -e {out2}")
# Test whether the build hook automatically skips unavailable builders. # And a failing build.
$builder1->block; client.fail("nix-build ${expr nodes.client.config 5}")
$client->succeed("nix-build ${expr nodes.client.config 4}");
'';
# Test whether the build hook automatically skips unavailable builders.
builder1.block()
client.succeed("nix-build ${expr nodes.client.config 4}")
'';
}) })

View file

@ -2,7 +2,7 @@
{ nixpkgs, system, nix }: { nixpkgs, system, nix }:
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
makeTest { makeTest {
@ -15,94 +15,109 @@ makeTest {
virtualisation.pathsInNixDB = [ pkgs.stdenv pkgs.pkgsi686Linux.stdenv ]; virtualisation.pathsInNixDB = [ pkgs.stdenv pkgs.pkgsi686Linux.stdenv ];
}; };
testScript = { nodes }: testScript = { nodes }: ''
'' # fmt: off
startAll; start_all()
# Copying to /tmp should succeed. # Copying to /tmp should succeed.
$machine->succeed('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" {} " machine.succeed(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" {} "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
")\' '); cp ${pkgs.coreutils}/bin/id /tmp/id
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
# Creating a setuid binary should fail. # Creating a setuid binary should fail.
$machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" {} " machine.fail(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" {} "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
chmod 4755 /tmp/id cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); chmod 4755 /tmp/id
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
# Creating a setgid binary should fail. # Creating a setgid binary should fail.
$machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" {} " machine.fail(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" {} "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
chmod 2755 /tmp/id cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); chmod 2755 /tmp/id
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
# The checks should also work on 32-bit binaries. # The checks should also work on 32-bit binaries.
$machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> { system = "i686-linux"; }; runCommand "foo" {} " machine.fail(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> { system = "i686-linux"; }; runCommand "foo" {} "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
chmod 2755 /tmp/id cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); chmod 2755 /tmp/id
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
# The tests above use fchmodat(). Test chmod() as well. # The tests above use fchmodat(). Test chmod() as well.
$machine->succeed('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " machine.succeed(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
perl -e \"chmod 0666, qw(/tmp/id) or die\" cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); perl -e \"chmod 0666, qw(/tmp/id) or die\"
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 666 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 666 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
$machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " machine.fail(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
perl -e \"chmod 04755, qw(/tmp/id) or die\" cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); perl -e \"chmod 04755, qw(/tmp/id) or die\"
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
# And test fchmod(). # And test fchmod().
$machine->succeed('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " machine.succeed(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 01750, \\\$x or die\" cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 01750, \\\$x or die\"
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 1750 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 1750 ]]')
$machine->succeed("rm /tmp/id"); machine.succeed("rm /tmp/id")
$machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " machine.fail(r"""
mkdir -p $out nix-build --no-sandbox -E '(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } "
cp ${pkgs.coreutils}/bin/id /tmp/id mkdir -p $out
perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 04777, \\\$x or die\" cp ${pkgs.coreutils}/bin/id /tmp/id
")\' '); perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 04777, \\\$x or die\"
")'
""".strip())
$machine->succeed('[[ $(stat -c %a /tmp/id) = 555 ]]'); machine.succeed('[[ $(stat -c %a /tmp/id) = 555 ]]')
$machine->succeed("rm /tmp/id");
'';
machine.succeed("rm /tmp/id")
'';
} }

View file

@ -0,0 +1,38 @@
source common.sh
BINARY_CACHE=file://$cacheDir
getHash() {
basename "$1" | cut -d '-' -f 1
}
getRemoteNarInfo () {
echo "$cacheDir/$(getHash "$1").narinfo"
}
cat <<EOF > $TEST_HOME/good.txt
Im a good path
EOF
cat <<EOF > $TEST_HOME/bad.txt
Im a bad path
EOF
good=$(nix-store --add $TEST_HOME/good.txt)
bad=$(nix-store --add $TEST_HOME/bad.txt)
nix copy --to "$BINARY_CACHE" "$good"
nix copy --to "$BINARY_CACHE" "$bad"
nix-collect-garbage >/dev/null 2>&1
# Falsifying the narinfo file for '$good'
goodPathNarInfo=$(getRemoteNarInfo "$good")
badPathNarInfo=$(getRemoteNarInfo "$bad")
for fieldName in URL FileHash FileSize NarHash NarSize; do
sed -i "/^$fieldName/d" "$goodPathNarInfo"
grep -E "^$fieldName" "$badPathNarInfo" >> "$goodPathNarInfo"
done
# Copying back '$good' from the binary cache. This should fail as it is
# corrupted
if nix copy --from "$BINARY_CACHE" "$good"; then
fail "Importing a path with a wrong CA field should fail"
fi

View file

@ -11,7 +11,7 @@ cp dependencies.nix $tarroot/default.nix
cp config.nix dependencies.builder*.sh $tarroot/ cp config.nix dependencies.builder*.sh $tarroot/
tarball=$TEST_ROOT/tarball.tar.xz tarball=$TEST_ROOT/tarball.tar.xz
(cd $TEST_ROOT && tar c tarball) | xz > $tarball (cd $TEST_ROOT && tar cf - tarball) | xz > $tarball
nix-env -f file://$tarball -qa --out-path | grep -q dependencies nix-env -f file://$tarball -qa --out-path | grep -q dependencies